mlx5-updates-2019-11-12
1) Merge mlx5-next for devlink reload and flowtable offloads dependencies 2) Devlink reload support 3) TC Flowtable offloads 4) Misc cleanup -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl3MgsAACgkQSD+KveBX +j6q8Qf+LFSUhQIpDnuuJpEnu1SAIxnzcm22vUZ3b0zAfWj2mTLqrntYghRnUpbF fk62w+yuHtaB+fsyzq17psR6rEcVbL8dfNGOtsz9TSsmDC5EvClFc/icUQTQPYGg 9EZQMTtAQ7dicjAX4dpT9ZnSjaiWWKKMFCc8ZOgZuAjsuhXG5iXQC0JgCJzyWJQ4 xmR3MJT7bAPDfRl2bWiwwLUlfG0qZljaBki1ThP2r5NttgNYwNCaHNuxnfji/YSB 7ZzJkq2ejID+HjBnEiowtXuOsLX0ETc2oZ3BOimAaG9qMK7wtF/5an1T1LLQT93L LGxDIhn65r0xVwN/chn8zlvDaBsVbA== =ZUuo -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2019-11-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2019-11-12 1) Merge mlx5-next for devlink reload and flowtable offloads dependencies 2) Devlink reload support 3) TC Flowtable offloads 4) Misc cleanup ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f97d139aaa
|
@ -154,6 +154,27 @@ User command examples:
|
|||
values:
|
||||
cmode runtime value smfs
|
||||
|
||||
enable_roce: RoCE enablement state
|
||||
----------------------------------
|
||||
RoCE enablement state controls driver support for RoCE traffic.
|
||||
When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well known UDP RoCE port is handled as raw ethernet traffic.
|
||||
|
||||
To change RoCE enablement state a user must change the driverinit cmode value and run devlink reload.
|
||||
|
||||
User command examples:
|
||||
|
||||
- Disable RoCE::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
|
||||
$ devlink dev reload pci/0000:06:00.0
|
||||
|
||||
- Read RoCE enablement state::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name enable_roce
|
||||
pci/0000:06:00.0:
|
||||
name enable_roce type generic
|
||||
values:
|
||||
cmode driverinit value true
|
||||
|
||||
Devlink health reporters
|
||||
========================
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
flow_steering_mode [DEVICE, DRIVER-SPECIFIC]
|
||||
Controls the flow steering mode of the driver.
|
||||
Two modes are supported:
|
||||
1. 'dmfs' - Device managed flow steering.
|
||||
2. 'smfs - Software/Driver managed flow steering.
|
||||
In DMFS mode, the HW steering entities are created and
|
||||
managed through the Firmware.
|
||||
In SMFS mode, the HW steering entities are created and
|
||||
managed though by the driver directly into Hardware
|
||||
without firmware intervention.
|
||||
Type: String
|
||||
Configuration mode: runtime
|
||||
|
||||
enable_roce [DEVICE, GENERIC]
|
||||
Enable handling of RoCE traffic in the device.
|
||||
Defaultly enabled.
|
||||
Configuration mode: driverinit
|
|
@ -65,3 +65,7 @@ reset_dev_on_drv_probe [DEVICE, GENERIC]
|
|||
Reset only if device firmware can be found in the
|
||||
filesystem.
|
||||
Type: u8
|
||||
|
||||
enable_roce [DEVICE, GENERIC]
|
||||
Enable handling of RoCE traffic in the device.
|
||||
Type: Boolean
|
||||
|
|
|
@ -35,7 +35,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
int vport_index;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||
profile = &uplink_rep_profile;
|
||||
profile = &raw_eth_profile;
|
||||
else
|
||||
return mlx5_ib_set_vport_rep(dev, rep);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "mlx5_ib.h"
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
extern const struct mlx5_ib_profile uplink_rep_profile;
|
||||
extern const struct mlx5_ib_profile raw_eth_profile;
|
||||
|
||||
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
|
|
|
@ -1031,7 +1031,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
if (MLX5_CAP_GEN(mdev, cd))
|
||||
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
|
||||
|
||||
if (!mlx5_core_is_pf(mdev))
|
||||
if (mlx5_core_is_vf(mdev))
|
||||
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
|
||||
|
||||
if (mlx5_ib_port_link_layer(ibdev, 1) ==
|
||||
|
@ -5145,8 +5145,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
|||
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
||||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
|
||||
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5249,11 +5248,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce)) {
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_eth_lag_init(dev);
|
||||
if (err)
|
||||
|
@ -5262,8 +5259,7 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|||
return 0;
|
||||
|
||||
err_disable_roce:
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -5271,8 +5267,7 @@ err_disable_roce:
|
|||
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_eth_lag_cleanup(dev);
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
}
|
||||
|
||||
struct mlx5_ib_counter {
|
||||
|
@ -6444,7 +6439,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
|
|||
.query_port = mlx5_ib_rep_query_port,
|
||||
};
|
||||
|
||||
static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
|
||||
return 0;
|
||||
|
@ -6484,7 +6479,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
|
|||
mlx5_remove_netdev_notifier(dev, port_num);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
enum rdma_link_layer ll;
|
||||
|
@ -6500,7 +6495,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
|
||||
static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_stage_common_roce_cleanup(dev);
|
||||
}
|
||||
|
@ -6807,7 +6802,7 @@ static const struct mlx5_ib_profile pf_profile = {
|
|||
mlx5_ib_stage_delay_drop_cleanup),
|
||||
};
|
||||
|
||||
const struct mlx5_ib_profile uplink_rep_profile = {
|
||||
const struct mlx5_ib_profile raw_eth_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
||||
mlx5_ib_stage_init_init,
|
||||
mlx5_ib_stage_init_cleanup),
|
||||
|
@ -6818,11 +6813,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
|
|||
mlx5_ib_stage_caps_init,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
|
||||
mlx5_ib_stage_rep_non_default_cb,
|
||||
mlx5_ib_stage_raw_eth_non_default_cb,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
||||
mlx5_ib_stage_rep_roce_init,
|
||||
mlx5_ib_stage_rep_roce_cleanup),
|
||||
mlx5_ib_stage_raw_eth_roce_init,
|
||||
mlx5_ib_stage_raw_eth_roce_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
||||
mlx5_init_srq_table,
|
||||
mlx5_cleanup_srq_table),
|
||||
|
@ -6898,6 +6893,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
|
|||
|
||||
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
const struct mlx5_ib_profile *profile;
|
||||
enum rdma_link_layer ll;
|
||||
struct mlx5_ib_dev *dev;
|
||||
int port_type_cap;
|
||||
|
@ -6933,7 +6929,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
dev->mdev = mdev;
|
||||
dev->num_ports = num_ports;
|
||||
|
||||
return __mlx5_ib_add(dev, &pf_profile);
|
||||
if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
|
||||
profile = &raw_eth_profile;
|
||||
else
|
||||
profile = &pf_profile;
|
||||
|
||||
return __mlx5_ib_add(dev, profile);
|
||||
}
|
||||
|
||||
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
||||
|
|
|
@ -85,6 +85,22 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
|
||||
return mlx5_unload_one(dev, false);
|
||||
}
|
||||
|
||||
static int mlx5_devlink_reload_up(struct devlink *devlink,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
|
||||
return mlx5_load_one(dev, false);
|
||||
}
|
||||
|
||||
static const struct devlink_ops mlx5_devlink_ops = {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
|
||||
|
@ -96,6 +112,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
|
|||
#endif
|
||||
.flash_update = mlx5_devlink_flash_update,
|
||||
.info_get = mlx5_devlink_info_get,
|
||||
.reload_down = mlx5_devlink_reload_down,
|
||||
.reload_up = mlx5_devlink_reload_up,
|
||||
};
|
||||
|
||||
struct devlink *mlx5_devlink_alloc(void)
|
||||
|
@ -177,12 +195,29 @@ enum mlx5_devlink_param_id {
|
|||
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
};
|
||||
|
||||
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
|
||||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
bool new_state = val.vbool;
|
||||
|
||||
if (new_state && !MLX5_CAP_GEN(dev, roce)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_param mlx5_devlink_params[] = {
|
||||
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
|
||||
mlx5_devlink_fs_mode_validate),
|
||||
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
|
||||
NULL, NULL, mlx5_devlink_enable_roce_validate),
|
||||
};
|
||||
|
||||
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
||||
|
@ -197,6 +232,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
|||
devlink_param_driverinit_value_set(devlink,
|
||||
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
value);
|
||||
|
||||
value.vbool = MLX5_CAP_GEN(dev, roce);
|
||||
devlink_param_driverinit_value_set(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
value);
|
||||
}
|
||||
|
||||
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
|
||||
|
@ -213,6 +253,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
|
|||
goto params_reg_err;
|
||||
mlx5_devlink_set_params_init_values(devlink);
|
||||
devlink_params_publish(devlink);
|
||||
devlink_reload_enable(devlink);
|
||||
return 0;
|
||||
|
||||
params_reg_err:
|
||||
|
@ -222,6 +263,7 @@ params_reg_err:
|
|||
|
||||
void mlx5_devlink_unregister(struct devlink *devlink)
|
||||
{
|
||||
devlink_reload_disable(devlink);
|
||||
devlink_params_unregister(devlink, mlx5_devlink_params,
|
||||
ARRAY_SIZE(mlx5_devlink_params));
|
||||
devlink_unregister(devlink);
|
||||
|
|
|
@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
|||
struct neighbour **out_n,
|
||||
u8 *out_ttl)
|
||||
{
|
||||
struct neighbour *n;
|
||||
struct rtable *rt;
|
||||
struct neighbour *n = NULL;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INET)
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
@ -138,8 +138,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
|||
struct neighbour **out_n,
|
||||
u8 *out_ttl)
|
||||
{
|
||||
struct neighbour *n = NULL;
|
||||
struct dst_entry *dst;
|
||||
struct neighbour *n;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
|
||||
int ret;
|
||||
|
@ -212,8 +212,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
|||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
|
||||
struct net_device *out_dev, *route_dev;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi4 fl4 = {};
|
||||
struct neighbour *n;
|
||||
int ipv4_encap_size;
|
||||
char *encap_header;
|
||||
u8 nud_state, ttl;
|
||||
|
@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
|||
if (max_encap_size < ipv4_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv4_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
if (!encap_header) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* used by mlx5e_detach_encap to lookup a neigh hash table
|
||||
* entry in the neigh hash table when a user deletes a rule
|
||||
|
@ -328,9 +331,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
|||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
|
||||
struct net_device *out_dev, *route_dev;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi6 fl6 = {};
|
||||
struct ipv6hdr *ip6h;
|
||||
struct neighbour *n;
|
||||
int ipv6_encap_size;
|
||||
char *encap_header;
|
||||
u8 nud_state, ttl;
|
||||
|
@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
|||
if (max_encap_size < ipv6_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv6_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
if (!encap_header) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* used by mlx5e_detach_encap to lookup a neigh hash table
|
||||
* entry in the neigh hash table when a user deletes a rule
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#include "en/xsk/rx.h"
|
||||
#include "en/xsk/tx.h"
|
||||
#include "en/hv_vhca_stats.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
|
||||
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
|
||||
|
@ -5427,6 +5428,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
dev_net_set(netdev, mlx5_core_net(mdev));
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
err = mlx5e_attach(mdev, priv);
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "en/tc_tun.h"
|
||||
#include "fs_core.h"
|
||||
#include "lib/port_tun.h"
|
||||
#include "lib/mlx5.h"
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "diag/en_rep_tracepoint.h"
|
||||
|
||||
|
@ -1243,21 +1244,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
|
|||
}
|
||||
}
|
||||
|
||||
static LIST_HEAD(mlx5e_rep_block_cb_list);
|
||||
static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
struct flow_cls_offload *f = type_data;
|
||||
struct flow_cls_offload cls_flower;
|
||||
struct mlx5e_priv *priv = cb_priv;
|
||||
struct mlx5_eswitch *esw;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
flags = MLX5_TC_FLAG(INGRESS) |
|
||||
MLX5_TC_FLAG(ESW_OFFLOAD) |
|
||||
MLX5_TC_FLAG(FT_OFFLOAD);
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Re-use tc offload path by moving the ft flow to the
|
||||
* reserved ft chain.
|
||||
*/
|
||||
memcpy(&cls_flower, f, sizeof(*f));
|
||||
cls_flower.common.chain_index = FDB_FT_CHAIN;
|
||||
err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
|
||||
memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
|
||||
return err;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
|
||||
static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
|
||||
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct flow_block_offload *f = type_data;
|
||||
|
||||
f->unlocked_driver_cb = true;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
f->unlocked_driver_cb = true;
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&mlx5e_rep_block_cb_list,
|
||||
&mlx5e_rep_block_tc_cb_list,
|
||||
mlx5e_rep_setup_tc_cb,
|
||||
priv, priv, true);
|
||||
case TC_SETUP_FT:
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&mlx5e_rep_block_ft_cb_list,
|
||||
mlx5e_rep_setup_ft_cb,
|
||||
priv, priv, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1877,6 +1917,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_net_set(netdev, mlx5_core_net(dev));
|
||||
rpriv->netdev = netdev;
|
||||
rep->rep_data[REP_ETH].priv = rpriv;
|
||||
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
|
||||
|
|
|
@ -74,6 +74,7 @@ enum {
|
|||
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
|
||||
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
|
||||
MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
|
||||
MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
|
||||
|
@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
|
|||
return flow_flag_test(flow, ESWITCH);
|
||||
}
|
||||
|
||||
static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
return flow_flag_test(flow, FT);
|
||||
}
|
||||
|
||||
static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
return flow_flag_test(flow, OFFLOADED);
|
||||
|
@ -1074,7 +1080,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
|
|||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->split_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
|
||||
|
||||
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
|
||||
if (!IS_ERR(rule))
|
||||
|
@ -1091,7 +1097,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
|
|||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->split_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
|
||||
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
|
||||
flow_flag_clear(flow, SLOW);
|
||||
}
|
||||
|
@ -1168,7 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (attr->chain > max_chain) {
|
||||
/* We check chain range only for tc flows.
|
||||
* For ft flows, we checked attr->chain was originally 0 and set it to
|
||||
* FDB_FT_CHAIN which is outside tc range.
|
||||
* See mlx5e_rep_setup_ft_cb().
|
||||
*/
|
||||
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
|
||||
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -3217,6 +3228,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
|||
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
const struct ip_tunnel_info *info = NULL;
|
||||
bool ft_flow = mlx5e_is_ft_flow(flow);
|
||||
const struct flow_action_entry *act;
|
||||
bool encap = false;
|
||||
u32 action = 0;
|
||||
|
@ -3261,6 +3273,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ft_flow && out_dev == priv->netdev) {
|
||||
/* Ignore forward to self rules generated
|
||||
* by adding both mlx5 devs to the flow table
|
||||
* block on a normal nft offload setup.
|
||||
*/
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't support more output ports, can't offload forwarding");
|
||||
|
@ -3385,6 +3405,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
|||
u32 dest_chain = act->chain_index;
|
||||
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
|
||||
|
||||
if (ft_flow) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (dest_chain <= attr->chain) {
|
||||
NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -3475,6 +3499,8 @@ static void get_flags(int flags, unsigned long *flow_flags)
|
|||
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
|
||||
if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
|
||||
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
|
||||
if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
|
||||
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
|
||||
|
||||
*flow_flags = __flow_flags;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,8 @@ enum {
|
|||
MLX5E_TC_FLAG_EGRESS_BIT,
|
||||
MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
|
||||
MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
|
||||
};
|
||||
|
||||
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
|
||||
|
|
|
@ -111,42 +111,32 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|||
}
|
||||
|
||||
/* E-Switch vport context HW commands */
|
||||
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||
void *in, int inlen)
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
|
||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
void *in, int inlen)
|
||||
{
|
||||
return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
|
||||
}
|
||||
|
||||
static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||
void *out, int outlen)
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
|
||||
|
||||
MLX5_SET(query_esw_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
void *out, int outlen)
|
||||
{
|
||||
return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
|
||||
}
|
||||
|
||||
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
||||
u16 vlan, u8 qos, u8 set_flags)
|
||||
{
|
||||
|
@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
|||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
field_select.vport_cvlan_insert, 1);
|
||||
|
||||
return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
|
||||
return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
|
||||
in, sizeof(in));
|
||||
}
|
||||
|
||||
/* E-Switch FDB */
|
||||
|
@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_cleanup_vepa_rules(esw);
|
||||
esw_destroy_legacy_fdb_table(esw);
|
||||
esw_destroy_legacy_vepa_table(esw);
|
||||
}
|
||||
|
||||
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
|
||||
MLX5_VPORT_MC_ADDR_CHANGE | \
|
||||
MLX5_VPORT_PROMISC_CHANGE)
|
||||
|
@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_cleanup_vepa_rules(esw);
|
||||
esw_destroy_legacy_fdb_table(esw);
|
||||
esw_destroy_legacy_vepa_table(esw);
|
||||
ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
|
||||
if (ret)
|
||||
esw_destroy_legacy_table(esw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esw_legacy_disable(struct mlx5_eswitch *esw)
|
||||
|
@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
|
||||
* it is already done by its netdev in mlx5e_execute_l2_action
|
||||
*/
|
||||
if (esw->manager_vport == vport)
|
||||
if (mlx5_esw_is_manager_vport(esw, vport))
|
||||
goto fdb_add;
|
||||
|
||||
err = mlx5_mpfs_add_mac(esw->dev, mac);
|
||||
|
@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
u16 vport = vaddr->vport;
|
||||
int err = 0;
|
||||
|
||||
/* Skip mlx5_mpfs_del_mac for eswitch managerss,
|
||||
/* Skip mlx5_mpfs_del_mac for eswitch managers,
|
||||
* it is already done by its netdev in mlx5e_execute_l2_action
|
||||
*/
|
||||
if (!vaddr->mpfs || esw->manager_vport == vport)
|
||||
if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
|
||||
goto fdb_del;
|
||||
|
||||
err = mlx5_mpfs_del_mac(esw->dev, mac);
|
||||
|
@ -1040,14 +1033,15 @@ out:
|
|||
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
|
||||
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
|
||||
mlx5_del_flow_rules(vport->egress.allowed_vlan);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
|
||||
mlx5_del_flow_rules(vport->egress.drop_rule);
|
||||
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
vport->egress.drop_rule = NULL;
|
||||
if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
|
||||
mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
|
||||
vport->egress.legacy.drop_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||||
|
@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
|||
vport->egress.acl = NULL;
|
||||
}
|
||||
|
||||
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
static int
|
||||
esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *acl;
|
||||
struct mlx5_flow_group *g;
|
||||
void *match_criteria;
|
||||
u32 *flow_group_in;
|
||||
/* The ingress acl table contains 4 groups
|
||||
* (2 active rules at the same time -
|
||||
* 1 allow rule from one of the first 3 groups.
|
||||
* 1 drop rule from the last group):
|
||||
* 1)Allow untagged traffic with smac=original mac.
|
||||
* 2)Allow untagged traffic.
|
||||
* 3)Allow traffic with smac=original mac.
|
||||
* 4)Drop all other traffic.
|
||||
*/
|
||||
int table_size = 4;
|
||||
int err = 0;
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
return 0;
|
||||
|
||||
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
mlx5_eswitch_vport_num_to_index(esw, vport->vport));
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int err;
|
||||
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||||
if (IS_ERR(acl)) {
|
||||
err = PTR_ERR(acl);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
}
|
||||
vport->ingress.acl = acl;
|
||||
|
||||
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto spoof_err;
|
||||
}
|
||||
vport->ingress.allow_untagged_spoofchk_grp = g;
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto untagged_err;
|
||||
}
|
||||
vport->ingress.allow_untagged_only_grp = g;
|
||||
vport->ingress.legacy.allow_untagged_only_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1158,108 +1116,178 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto allow_spoof_err;
|
||||
}
|
||||
vport->ingress.allow_spoofchk_only_grp = g;
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto drop_err;
|
||||
}
|
||||
vport->ingress.drop_grp = g;
|
||||
vport->ingress.legacy.drop_grp = g;
|
||||
kvfree(flow_group_in);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_spoofchk_only_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_untagged_only_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_untagged_spoofchk_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
drop_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||||
}
|
||||
|
||||
allow_spoof_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||||
vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||||
}
|
||||
untagged_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||||
}
|
||||
spoof_err:
|
||||
kvfree(flow_group_in);
|
||||
return err;
|
||||
}
|
||||
|
||||
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport, int table_size)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *acl;
|
||||
int vport_index;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
vport_index);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
|
||||
vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||||
if (IS_ERR(acl)) {
|
||||
err = PTR_ERR(acl);
|
||||
esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
|
||||
vport->vport, err);
|
||||
return err;
|
||||
}
|
||||
vport->ingress.acl = acl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
|
||||
{
|
||||
if (!vport->ingress.acl)
|
||||
return;
|
||||
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
vport->ingress.acl = NULL;
|
||||
}
|
||||
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
|
||||
mlx5_del_flow_rules(vport->ingress.drop_rule);
|
||||
if (vport->ingress.legacy.drop_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
|
||||
vport->ingress.legacy.drop_rule = NULL;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
|
||||
if (vport->ingress.allow_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.allow_rule);
|
||||
|
||||
vport->ingress.drop_rule = NULL;
|
||||
vport->ingress.allow_rule = NULL;
|
||||
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
vport->ingress.allow_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
if (!vport->ingress.acl)
|
||||
return;
|
||||
|
||||
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.drop_grp);
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
vport->ingress.acl = NULL;
|
||||
vport->ingress.drop_grp = NULL;
|
||||
vport->ingress.allow_spoofchk_only_grp = NULL;
|
||||
vport->ingress.allow_untagged_only_grp = NULL;
|
||||
vport->ingress.allow_untagged_spoofchk_grp = NULL;
|
||||
if (vport->ingress.legacy.allow_spoofchk_only_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.allow_untagged_only_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||||
vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.drop_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
|
||||
vport->ingress.legacy.drop_grp = NULL;
|
||||
}
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
|
||||
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_fc *counter = vport->ingress.drop_counter;
|
||||
struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
|
||||
struct mlx5_flow_destination drop_ctr_dst = {0};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_flow_spec *spec = NULL;
|
||||
int dest_num = 0;
|
||||
int err = 0;
|
||||
u8 *smac_v;
|
||||
|
||||
/* The ingress acl table contains 4 groups
|
||||
* (2 active rules at the same time -
|
||||
* 1 allow rule from one of the first 3 groups.
|
||||
* 1 drop rule from the last group):
|
||||
* 1)Allow untagged traffic with smac=original mac.
|
||||
* 2)Allow untagged traffic.
|
||||
* 3)Allow traffic with smac=original mac.
|
||||
* 4)Drop all other traffic.
|
||||
*/
|
||||
int table_size = 4;
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
|
||||
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = esw_vport_enable_ingress_acl(esw, vport);
|
||||
if (err) {
|
||||
mlx5_core_warn(esw->dev,
|
||||
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||||
err, vport->vport);
|
||||
return err;
|
||||
if (!vport->ingress.acl) {
|
||||
err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] enable ingress acl err (%d)\n",
|
||||
err, vport->vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
esw_debug(esw->dev,
|
||||
|
@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|||
dst = &drop_ctr_dst;
|
||||
dest_num++;
|
||||
}
|
||||
vport->ingress.drop_rule =
|
||||
vport->ingress.legacy.drop_rule =
|
||||
mlx5_add_flow_rules(vport->ingress.acl, spec,
|
||||
&flow_act, dst, dest_num);
|
||||
if (IS_ERR(vport->ingress.drop_rule)) {
|
||||
err = PTR_ERR(vport->ingress.drop_rule);
|
||||
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
|
||||
err = PTR_ERR(vport->ingress.legacy.drop_rule);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure ingress drop rule, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->ingress.drop_rule = NULL;
|
||||
vport->ingress.legacy.drop_rule = NULL;
|
||||
goto out;
|
||||
}
|
||||
kvfree(spec);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (err)
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
u16 vlan_id, u32 flow_action)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
|
||||
if (vport->egress.allowed_vlan)
|
||||
return -EEXIST;
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = flow_action;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
}
|
||||
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
@ -1331,7 +1397,7 @@ out:
|
|||
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_fc *counter = vport->egress.drop_counter;
|
||||
struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
|
||||
struct mlx5_flow_destination drop_ctr_dst = {0};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
|
@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|||
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
|
||||
vport->vport, vport->info.vlan, vport->info.qos);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Allowed vlan rule */
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
goto out;
|
||||
}
|
||||
err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Drop others rule (star rule) */
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
goto out;
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
|
||||
/* Attach egress drop flow counter */
|
||||
|
@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|||
dst = &drop_ctr_dst;
|
||||
dest_num++;
|
||||
}
|
||||
vport->egress.drop_rule =
|
||||
vport->egress.legacy.drop_rule =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, dst, dest_num);
|
||||
if (IS_ERR(vport->egress.drop_rule)) {
|
||||
err = PTR_ERR(vport->egress.drop_rule);
|
||||
if (IS_ERR(vport->egress.legacy.drop_rule)) {
|
||||
err = PTR_ERR(vport->egress.legacy.drop_rule);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress drop rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.drop_rule = NULL;
|
||||
vport->egress.legacy.drop_rule = NULL;
|
||||
}
|
||||
out:
|
||||
kvfree(spec);
|
||||
|
@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
u16 vport_num = vport->vport;
|
||||
int flags;
|
||||
|
||||
if (esw->manager_vport == vport_num)
|
||||
if (mlx5_esw_is_manager_vport(esw, vport_num))
|
||||
return;
|
||||
|
||||
mlx5_modify_vport_admin_state(esw->dev,
|
||||
|
@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
||||
flags);
|
||||
|
||||
/* Only legacy mode needs ACLs */
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
esw_vport_ingress_config(esw, vport);
|
||||
esw_vport_egress_config(esw, vport);
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
|
||||
static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_core_dev *dev = vport->dev;
|
||||
int ret;
|
||||
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
|
||||
vport->ingress.drop_counter = mlx5_fc_create(dev, false);
|
||||
if (IS_ERR(vport->ingress.drop_counter)) {
|
||||
esw_warn(dev,
|
||||
/* Only non manager vports need ACL in legacy mode */
|
||||
if (mlx5_esw_is_manager_vport(esw, vport->vport))
|
||||
return 0;
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
|
||||
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
|
||||
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure ingress drop rule counter failed\n",
|
||||
vport->vport);
|
||||
vport->ingress.drop_counter = NULL;
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
|
||||
vport->egress.drop_counter = mlx5_fc_create(dev, false);
|
||||
if (IS_ERR(vport->egress.drop_counter)) {
|
||||
esw_warn(dev,
|
||||
ret = esw_vport_ingress_config(esw, vport);
|
||||
if (ret)
|
||||
goto ingress_err;
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
|
||||
vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
|
||||
if (IS_ERR(vport->egress.legacy.drop_counter)) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress drop rule counter failed\n",
|
||||
vport->vport);
|
||||
vport->egress.drop_counter = NULL;
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = esw_vport_egress_config(esw, vport);
|
||||
if (ret)
|
||||
goto egress_err;
|
||||
|
||||
return 0;
|
||||
|
||||
egress_err:
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
|
||||
ingress_err:
|
||||
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
|
||||
static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_core_dev *dev = vport->dev;
|
||||
|
||||
if (vport->ingress.drop_counter)
|
||||
mlx5_fc_destroy(dev, vport->ingress.drop_counter);
|
||||
if (vport->egress.drop_counter)
|
||||
mlx5_fc_destroy(dev, vport->egress.drop_counter);
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
return esw_vport_create_legacy_acl_tables(esw, vport);
|
||||
else
|
||||
return esw_vport_create_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
|
||||
{
|
||||
if (mlx5_esw_is_manager_vport(esw, vport->vport))
|
||||
return;
|
||||
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
}
|
||||
|
||||
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_vport_destroy_legacy_acl_tables(esw, vport);
|
||||
else
|
||||
esw_vport_destroy_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
u16 vport_num = vport->vport;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
WARN_ON(vport->enabled);
|
||||
|
||||
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
||||
|
||||
/* Create steering drop counters for ingress and egress ACLs */
|
||||
if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_vport_create_drop_counters(vport);
|
||||
|
||||
/* Restore old vport configuration */
|
||||
esw_apply_vport_conf(esw, vport);
|
||||
|
||||
ret = esw_vport_setup_acl(esw, vport);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
/* Attach vport to the eswitch rate limiter */
|
||||
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
|
||||
vport->qos.bw_share))
|
||||
|
@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
|
||||
* in smartNIC as it's a vport group manager.
|
||||
*/
|
||||
if (esw->manager_vport == vport_num ||
|
||||
if (mlx5_esw_is_manager_vport(esw, vport_num) ||
|
||||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
|
||||
vport->info.trusted = true;
|
||||
|
||||
|
@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||
|
||||
esw->enabled_vports++;
|
||||
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
|
||||
done:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esw_disable_vport(struct mlx5_eswitch *esw,
|
||||
|
@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||
{
|
||||
u16 vport_num = vport->vport;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (!vport->enabled)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
|
||||
/* Mark this vport as disabled to discard new events */
|
||||
vport->enabled = false;
|
||||
|
||||
/* Wait for current already scheduled events to complete */
|
||||
flush_workqueue(esw->work_queue);
|
||||
/* Disable events from this vport */
|
||||
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
|
||||
mutex_lock(&esw->state_lock);
|
||||
/* We don't assume VFs will cleanup after themselves.
|
||||
* Calling vport change handler while vport is disabled will cleanup
|
||||
* the vport resources.
|
||||
|
@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||
esw_vport_change_handle_locked(vport);
|
||||
vport->enabled_events = 0;
|
||||
esw_vport_disable_qos(esw, vport);
|
||||
if (esw->manager_vport != vport_num &&
|
||||
esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
mlx5_modify_vport_admin_state(esw->dev,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
vport_num, 1,
|
||||
MLX5_VPORT_ADMIN_STATE_DOWN);
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
esw_vport_destroy_drop_counters(vport);
|
||||
}
|
||||
|
||||
esw_vport_cleanup_acl(esw, vport);
|
||||
esw->enabled_vports--;
|
||||
|
||||
done:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
}
|
||||
|
||||
|
@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
|
|||
|
||||
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (vport->enabled)
|
||||
if (!IS_ERR(vport))
|
||||
queue_work(esw->work_queue, &vport->vport_change_handler);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
@ -1846,26 +1938,51 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
|
|||
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
|
||||
* whichever are present on the eswitch.
|
||||
*/
|
||||
void
|
||||
int
|
||||
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int num_vfs;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Enable PF vport */
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Enable ECPF vports */
|
||||
/* Enable ECPF vport */
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
goto ecpf_err;
|
||||
}
|
||||
|
||||
/* Enable VF vports */
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
goto vf_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
vf_err:
|
||||
num_vfs = i - 1;
|
||||
mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
|
||||
esw_disable_vport(esw, vport);
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
esw_disable_vport(esw, vport);
|
||||
}
|
||||
|
||||
ecpf_err:
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
esw_disable_vport(esw, vport);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
|
||||
|
@ -2485,12 +2602,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|||
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
|
||||
return 0;
|
||||
|
||||
if (vport->egress.drop_counter)
|
||||
mlx5_fc_query(dev, vport->egress.drop_counter,
|
||||
if (vport->egress.legacy.drop_counter)
|
||||
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
|
||||
&stats->rx_dropped, &bytes);
|
||||
|
||||
if (vport->ingress.drop_counter)
|
||||
mlx5_fc_query(dev, vport->ingress.drop_counter,
|
||||
if (vport->ingress.legacy.drop_counter)
|
||||
mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
|
||||
&stats->tx_dropped, &bytes);
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
|
||||
|
|
|
@ -43,6 +43,16 @@
|
|||
#include <linux/mlx5/fs.h>
|
||||
#include "lib/mpfs.h"
|
||||
|
||||
#define FDB_TC_MAX_CHAIN 3
|
||||
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
|
||||
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
|
||||
|
||||
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
|
||||
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
|
||||
|
||||
#define FDB_TC_MAX_PRIO 16
|
||||
#define FDB_TC_LEVELS_PER_PRIO 2
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
|
||||
#define MLX5_MAX_UC_PER_VPORT(dev) \
|
||||
|
@ -59,21 +69,22 @@
|
|||
#define mlx5_esw_has_fwd_fdb(dev) \
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
|
||||
|
||||
#define FDB_MAX_CHAIN 3
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 16
|
||||
|
||||
struct vport_ingress {
|
||||
struct mlx5_flow_table *acl;
|
||||
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||||
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||||
struct mlx5_flow_group *allow_untagged_only_grp;
|
||||
struct mlx5_flow_group *drop_grp;
|
||||
struct mlx5_modify_hdr *modify_metadata;
|
||||
struct mlx5_flow_handle *modify_metadata_rule;
|
||||
struct mlx5_flow_handle *allow_rule;
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
struct mlx5_flow_handle *allow_rule;
|
||||
struct {
|
||||
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||||
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||||
struct mlx5_flow_group *allow_untagged_only_grp;
|
||||
struct mlx5_flow_group *drop_grp;
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
} legacy;
|
||||
struct {
|
||||
struct mlx5_flow_group *metadata_grp;
|
||||
struct mlx5_modify_hdr *modify_metadata;
|
||||
struct mlx5_flow_handle *modify_metadata_rule;
|
||||
} offloads;
|
||||
};
|
||||
|
||||
struct vport_egress {
|
||||
|
@ -81,8 +92,10 @@ struct vport_egress {
|
|||
struct mlx5_flow_group *allowed_vlans_grp;
|
||||
struct mlx5_flow_group *drop_grp;
|
||||
struct mlx5_flow_handle *allowed_vlan;
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
struct {
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
} legacy;
|
||||
};
|
||||
|
||||
struct mlx5_vport_drop_stats {
|
||||
|
@ -139,7 +152,6 @@ enum offloads_fdb_flags {
|
|||
|
||||
extern const unsigned int ESW_POOLS[4];
|
||||
|
||||
#define PRIO_LEVELS 2
|
||||
struct mlx5_eswitch_fdb {
|
||||
union {
|
||||
struct legacy_fdb {
|
||||
|
@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb {
|
|||
struct {
|
||||
struct mlx5_flow_table *fdb;
|
||||
u32 num_rules;
|
||||
} fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
|
||||
} fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
|
||||
/* Protects fdb_prio table */
|
||||
struct mutex fdb_prio_lock;
|
||||
|
||||
|
@ -217,8 +229,8 @@ enum {
|
|||
struct mlx5_eswitch {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_nb nb;
|
||||
/* legacy data structures */
|
||||
struct mlx5_eswitch_fdb fdb_table;
|
||||
/* legacy data structures */
|
||||
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
||||
struct esw_mc_addr mc_promisc;
|
||||
/* end of legacy */
|
||||
|
@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
|||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
int table_size);
|
||||
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
|
||||
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
u32 rate_mbps);
|
||||
|
||||
|
@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|||
struct ifla_vf_stats *vf_stats);
|
||||
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
|
||||
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *in, int inlen);
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *out, int outlen);
|
||||
|
||||
struct mlx5_flow_spec;
|
||||
|
@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
u16 vport, u16 vlan, u8 qos, u8 set_flags);
|
||||
|
||||
int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
u16 vlan_id, u32 flow_action);
|
||||
|
||||
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
|
||||
u8 vlan_depth)
|
||||
{
|
||||
|
@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
|
|||
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return esw->manager_vport == vport_num;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_ecpf_esw_manager(dev) ?
|
||||
|
@ -593,11 +615,18 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
|||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||
|
||||
void
|
||||
int
|
||||
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
enum mlx5_eswitch_vport_event enabled_events);
|
||||
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
|
||||
|
||||
int
|
||||
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void
|
||||
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
|
@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
|||
|
||||
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
|
||||
|
||||
#define FDB_MAX_CHAIN 1
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 1
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
#endif /* __MLX5_ESWITCH_H__ */
|
||||
|
|
|
@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
|
|||
u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
|
||||
return FDB_MAX_CHAIN;
|
||||
return FDB_TC_MAX_CHAIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
|
|||
u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
|
||||
return FDB_MAX_PRIO;
|
||||
return FDB_TC_MAX_PRIO;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -599,7 +599,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
|
|||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
return 0;
|
||||
|
||||
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
|
||||
err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
|
||||
out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -618,7 +618,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
|
|||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
field_select.fdb_to_vport_reg_c_id, 1);
|
||||
|
||||
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
|
||||
return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
|
||||
in, sizeof(in));
|
||||
}
|
||||
|
||||
|
@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
int table_prio, l = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
if (chain == FDB_SLOW_PATH_CHAIN)
|
||||
if (chain == FDB_TC_SLOW_PATH_CHAIN)
|
||||
return esw->fdb_table.offloads.slow_fdb;
|
||||
|
||||
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
|
||||
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
|
||||
|
||||
table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
|
||||
table_prio = prio - 1;
|
||||
|
||||
/* create earlier levels for correct fs_core lookup when
|
||||
* connecting tables
|
||||
|
@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
{
|
||||
int l;
|
||||
|
||||
if (chain == FDB_SLOW_PATH_CHAIN)
|
||||
if (chain == FDB_TC_SLOW_PATH_CHAIN)
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
@ -1777,9 +1777,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
|
|||
flow_act.vlan[0].vid = 0;
|
||||
flow_act.vlan[0].prio = 0;
|
||||
|
||||
if (vport->ingress.modify_metadata_rule) {
|
||||
if (vport->ingress.offloads.modify_metadata_rule) {
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
flow_act.modify_hdr = vport->ingress.modify_metadata;
|
||||
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
||||
}
|
||||
|
||||
vport->ingress.allow_rule =
|
||||
|
@ -1815,11 +1815,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(set_action_in, action, data,
|
||||
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
|
||||
|
||||
vport->ingress.modify_metadata =
|
||||
vport->ingress.offloads.modify_metadata =
|
||||
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
1, action);
|
||||
if (IS_ERR(vport->ingress.modify_metadata)) {
|
||||
err = PTR_ERR(vport->ingress.modify_metadata);
|
||||
if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
|
||||
err = PTR_ERR(vport->ingress.offloads.modify_metadata);
|
||||
esw_warn(esw->dev,
|
||||
"failed to alloc modify header for vport %d ingress acl (%d)\n",
|
||||
vport->vport, err);
|
||||
|
@ -1827,100 +1827,76 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
flow_act.modify_hdr = vport->ingress.modify_metadata;
|
||||
vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
|
||||
&spec, &flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->ingress.modify_metadata_rule)) {
|
||||
err = PTR_ERR(vport->ingress.modify_metadata_rule);
|
||||
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
||||
vport->ingress.offloads.modify_metadata_rule =
|
||||
mlx5_add_flow_rules(vport->ingress.acl,
|
||||
&spec, &flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
|
||||
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
|
||||
esw_warn(esw->dev,
|
||||
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->ingress.modify_metadata_rule = NULL;
|
||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err)
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
||||
return err;
|
||||
}
|
||||
|
||||
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (vport->ingress.modify_metadata_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
|
||||
if (vport->ingress.offloads.modify_metadata_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
||||
|
||||
vport->ingress.modify_metadata_rule = NULL;
|
||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_group *g;
|
||||
u32 *flow_group_in;
|
||||
int ret = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
||||
return 0;
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
/* For prio tag mode, there is only 1 FTEs:
|
||||
* 1) prio tag packets - pop the prio tag VLAN, allow
|
||||
* Unmatched traffic is allowed by default
|
||||
*/
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||||
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_egress_acl(esw, vport);
|
||||
if (err) {
|
||||
mlx5_core_warn(esw->dev,
|
||||
"failed to enable egress acl (%d) on vport[%d]\n",
|
||||
err, vport->vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure prio tag egress rules\n", vport->vport);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto out_no_mem;
|
||||
}
|
||||
|
||||
/* prio tag vlan rule - pop it so VF receives untagged packets */
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
ret = PTR_ERR(g);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
goto out;
|
||||
"Failed to create vport[%d] ingress metadata group, err(%d)\n",
|
||||
vport->vport, ret);
|
||||
goto grp_err;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(spec);
|
||||
out_no_mem:
|
||||
if (err)
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
return err;
|
||||
vport->ingress.offloads.metadata_grp = g;
|
||||
grp_err:
|
||||
kvfree(flow_group_in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
|
||||
{
|
||||
if (vport->ingress.offloads.metadata_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
|
||||
vport->ingress.offloads.metadata_grp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -1929,8 +1905,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
|||
return 0;
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_ingress_acl(esw, vport);
|
||||
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||||
|
@ -1938,25 +1913,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = esw_vport_create_ingress_acl_group(esw, vport);
|
||||
if (err)
|
||||
goto group_err;
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure ingress rules\n", vport->vport);
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
goto metadata_err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
||||
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_ingress_prio_tag_config(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
goto prio_tag_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out:
|
||||
prio_tag_err:
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
metadata_err:
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_group(vport);
|
||||
group_err:
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
||||
return 0;
|
||||
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_egress_acl(esw, vport);
|
||||
if (err)
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
return err;
|
||||
|
||||
/* For prio tag mode, there is only 1 FTEs:
|
||||
* 1) prio tag packets - pop the prio tag VLAN, allow
|
||||
* Unmatched traffic is allowed by default
|
||||
*/
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure prio tag egress rules\n", vport->vport);
|
||||
|
||||
/* prio tag vlan rule - pop it so VF receives untagged packets */
|
||||
err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
|
||||
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
|
||||
if (err)
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1980,54 +1995,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
int
|
||||
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = esw_vport_ingress_config(esw, vport);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_egress_config(esw, vport);
|
||||
if (err) {
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_group(vport);
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
|
||||
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i, j;
|
||||
int err;
|
||||
|
||||
if (esw_check_vport_match_metadata_supported(esw))
|
||||
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
err = esw_vport_ingress_common_config(esw, vport);
|
||||
if (err)
|
||||
goto err_ingress;
|
||||
|
||||
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_egress_prio_tag_config(esw, vport);
|
||||
if (err)
|
||||
goto err_egress;
|
||||
}
|
||||
}
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_egress:
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
err_ingress:
|
||||
for (j = MLX5_VPORT_PF; j < i; j++) {
|
||||
vport = &esw->vports[j];
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
}
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
err = esw_vport_create_offloads_acl_tables(esw, vport);
|
||||
if (err)
|
||||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
}
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
esw_vport_destroy_offloads_acl_tables(esw, vport);
|
||||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
}
|
||||
|
||||
|
@ -2045,7 +2065,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
|
||||
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
||||
err = esw_create_offloads_acl_tables(esw);
|
||||
err = esw_create_uplink_offloads_acl_tables(esw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2070,7 +2090,7 @@ create_ft_err:
|
|||
esw_destroy_offloads_fdb_tables(esw);
|
||||
|
||||
create_fdb_err:
|
||||
esw_destroy_offloads_acl_tables(esw);
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2080,7 +2100,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
|
|||
esw_destroy_vport_rx_group(esw);
|
||||
esw_destroy_offloads_table(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
esw_destroy_offloads_acl_tables(esw);
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2169,7 +2189,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||
if (err)
|
||||
goto err_vport_metadata;
|
||||
|
||||
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||
if (err)
|
||||
goto err_vports;
|
||||
|
||||
err = esw_offloads_load_all_reps(esw);
|
||||
if (err)
|
||||
|
@ -2182,6 +2204,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||
|
||||
err_reps:
|
||||
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||
err_vports:
|
||||
esw_set_passing_vport_metadata(esw, false);
|
||||
err_vport_metadata:
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
|
|
|
@ -2400,9 +2400,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
|
|||
int acc_level_ns = acc_level;
|
||||
|
||||
prio->start_level = acc_level;
|
||||
fs_for_each_ns(ns, prio)
|
||||
fs_for_each_ns(ns, prio) {
|
||||
/* This updates start_level and num_levels of ns's priority descendants */
|
||||
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
|
||||
|
||||
/* If this a prio with chains, and we can jump from one chain
|
||||
* (namepsace) to another, so we accumulate the levels
|
||||
*/
|
||||
if (prio->node.type == FS_TYPE_PRIO_CHAINS)
|
||||
acc_level = acc_level_ns;
|
||||
}
|
||||
|
||||
if (!prio->num_levels)
|
||||
prio->num_levels = acc_level_ns - prio->start_level;
|
||||
WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
|
||||
|
@ -2591,58 +2599,109 @@ out_err:
|
|||
steering->rdma_rx_root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
|
||||
/* FT and tc chains are stored in the same array so we can re-use the
|
||||
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
|
||||
* When creating a new ns for each chain store it in the first available slot.
|
||||
* Assume tc chains are created and stored first and only then the FT chain.
|
||||
*/
|
||||
static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
|
||||
struct mlx5_flow_namespace *ns)
|
||||
{
|
||||
int chain = 0;
|
||||
|
||||
while (steering->fdb_sub_ns[chain])
|
||||
++chain;
|
||||
|
||||
steering->fdb_sub_ns[chain] = ns;
|
||||
}
|
||||
|
||||
static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
|
||||
struct fs_prio *maj_prio)
|
||||
{
|
||||
struct mlx5_flow_namespace *ns;
|
||||
struct fs_prio *maj_prio;
|
||||
struct fs_prio *min_prio;
|
||||
int prio;
|
||||
|
||||
ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
|
||||
if (IS_ERR(ns))
|
||||
return PTR_ERR(ns);
|
||||
|
||||
for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
|
||||
min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
|
||||
if (IS_ERR(min_prio))
|
||||
return PTR_ERR(min_prio);
|
||||
}
|
||||
|
||||
store_fdb_sub_ns_prio_chain(steering, ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_fdb_chains(struct mlx5_flow_steering *steering,
|
||||
int fs_prio,
|
||||
int chains)
|
||||
{
|
||||
struct fs_prio *maj_prio;
|
||||
int levels;
|
||||
int chain;
|
||||
int prio;
|
||||
int err;
|
||||
|
||||
levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
|
||||
maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
|
||||
fs_prio,
|
||||
levels);
|
||||
if (IS_ERR(maj_prio))
|
||||
return PTR_ERR(maj_prio);
|
||||
|
||||
for (chain = 0; chain < chains; chain++) {
|
||||
err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
int err;
|
||||
|
||||
steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
|
||||
sizeof(*steering->fdb_sub_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->fdb_sub_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *maj_prio;
|
||||
int err;
|
||||
|
||||
steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
|
||||
if (!steering->fdb_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
|
||||
(FDB_MAX_CHAIN + 1), GFP_KERNEL);
|
||||
if (!steering->fdb_sub_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
|
||||
1);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
err = PTR_ERR(maj_prio);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
|
||||
maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
|
||||
FDB_FAST_PATH,
|
||||
levels);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
err = PTR_ERR(maj_prio);
|
||||
err = create_fdb_fast_path(steering);
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
|
||||
ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
|
||||
if (IS_ERR(ns)) {
|
||||
err = PTR_ERR(ns);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
|
||||
min_prio = fs_create_prio(ns, prio, 2);
|
||||
if (IS_ERR(min_prio)) {
|
||||
err = PTR_ERR(min_prio);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
steering->fdb_sub_ns[chain] = ns;
|
||||
}
|
||||
|
||||
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
|
|
|
@ -84,4 +84,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
|
|||
void *key, u32 sz_bytes, u32 *p_key_id);
|
||||
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
|
||||
|
||||
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return devlink_net(priv_to_devlink(dev));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1168,7 +1168,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
|||
mlx5_put_uars_page(dev, dev->priv.uar);
|
||||
}
|
||||
|
||||
static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -1226,7 +1226,7 @@ function_teardown:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
||||
int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
||||
{
|
||||
if (cleanup) {
|
||||
mlx5_unregister_device(dev);
|
||||
|
|
|
@ -243,4 +243,7 @@ enum {
|
|||
|
||||
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
|
||||
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
|
||||
|
||||
int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
|
||||
#endif /* __MLX5_CORE_H__ */
|
||||
|
|
|
@ -108,10 +108,10 @@ enable_vfs_hca:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf)
|
||||
static void
|
||||
mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
|
||||
{
|
||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||
int num_vfs = pci_num_vf(dev->pdev);
|
||||
int err;
|
||||
int vf;
|
||||
|
||||
|
@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
|
|||
err = pci_enable_sriov(pdev, num_vfs);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
|
||||
mlx5_device_disable_sriov(dev, true);
|
||||
mlx5_device_disable_sriov(dev, num_vfs, true);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -155,9 +155,10 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
|
|||
static void mlx5_sriov_disable(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
int num_vfs = pci_num_vf(dev->pdev);
|
||||
|
||||
pci_disable_sriov(pdev);
|
||||
mlx5_device_disable_sriov(dev, true);
|
||||
mlx5_device_disable_sriov(dev, num_vfs, true);
|
||||
}
|
||||
|
||||
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
|
@ -192,7 +193,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
|
|||
if (!mlx5_core_is_pf(dev))
|
||||
return;
|
||||
|
||||
mlx5_device_disable_sriov(dev, false);
|
||||
mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
|
||||
}
|
||||
|
||||
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
|
||||
|
|
|
@ -154,7 +154,7 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
|
|||
nic_matcher->num_of_builders =
|
||||
nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
|
||||
|
||||
if (!nic_matcher->ste_builder) {
|
||||
if (!nic_matcher->num_of_builders) {
|
||||
mlx5dr_dbg(matcher->tbl->dmn,
|
||||
"Rule not supported on this matcher due to IP related fields\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1121,6 +1121,11 @@ static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
|
|||
return dev->coredev_type == MLX5_COREDEV_PF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->coredev_type == MLX5_COREDEV_VF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu;
|
||||
|
@ -1186,4 +1191,15 @@ enum {
|
|||
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
|
||||
};
|
||||
|
||||
static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
union devlink_param_value val;
|
||||
|
||||
devlink_param_driverinit_value_get(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
&val);
|
||||
return val.vbool;
|
||||
}
|
||||
|
||||
#endif /* MLX5_DRIVER_H */
|
||||
|
|
|
@ -80,7 +80,8 @@ enum mlx5_flow_namespace_type {
|
|||
|
||||
enum {
|
||||
FDB_BYPASS_PATH,
|
||||
FDB_FAST_PATH,
|
||||
FDB_TC_OFFLOAD,
|
||||
FDB_FT_OFFLOAD,
|
||||
FDB_SLOW_PATH,
|
||||
};
|
||||
|
||||
|
|
|
@ -1153,7 +1153,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 log_max_srq[0x5];
|
||||
u8 reserved_at_b0[0x10];
|
||||
|
||||
u8 reserved_at_c0[0x8];
|
||||
u8 max_sgl_for_optimized_performance[0x8];
|
||||
u8 log_max_cq_sz[0x8];
|
||||
u8 reserved_at_d0[0xb];
|
||||
u8 log_max_cq[0x5];
|
||||
|
|
|
@ -402,6 +402,7 @@ enum devlink_param_generic_id {
|
|||
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
|
||||
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
|
||||
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
|
@ -436,6 +437,9 @@ enum devlink_param_generic_id {
|
|||
"reset_dev_on_drv_probe"
|
||||
#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
|
|
|
@ -3006,6 +3006,11 @@ static const struct devlink_param devlink_param_generic[] = {
|
|||
.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
.name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int devlink_param_generic_verify(const struct devlink_param *param)
|
||||
|
|
Loading…
Reference in New Issue