Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Misc updates from mlx5-next branch: 1) Add the required HW definitions and structures for upcoming TLS support. 2) Add support for MCQI and MCQS hardware registers for fw version query. 3) Added hardware bits and structures definitions for sub-functions 4) Small code cleanup and improvement for PF pci driver. 5) Bluefield (ECPF) updates and refactoring for better E-Switch management on ECPF embedded CPU NIC: 5.1) Consolidate querying eswitch number of VFs 5.2) Register event handler at the correct E-Switch init stage 5.3) Setup PF's inline mode and vlan pop when the ECPF is the E-Swtich manager ( the host PF is basically a VF ). 5.4) Handle Vport UC address changes in switchdev mode. 6) Cleanup the rep and netdev reference when unloading IB rep. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> i# All conflicts fixed but you are still merging.
This commit is contained in:
commit
e08a976a16
|
@ -37,7 +37,7 @@
|
|||
#include "mlx5_ib.h"
|
||||
#include "srq.h"
|
||||
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
|
||||
|
||||
|
@ -891,6 +891,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
|||
int entries = attr->cqe;
|
||||
int vector = attr->comp_vector;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
struct mlx5_ib_cq *cq;
|
||||
int uninitialized_var(index);
|
||||
int uninitialized_var(inlen);
|
||||
|
@ -958,7 +959,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
|||
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
|
||||
MLX5_SET(cqc, cqc, oi, 1);
|
||||
|
||||
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
|
||||
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
|
||||
|
|
|
@ -14,9 +14,10 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
int vport_index;
|
||||
|
||||
ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
|
||||
vport_index = ibdev->free_port++;
|
||||
vport_index = rep->vport_index;
|
||||
|
||||
ibdev->port[vport_index].rep = rep;
|
||||
rep->rep_data[REP_IB].priv = ibdev;
|
||||
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
|
||||
ibdev->port[vport_index].roce.netdev =
|
||||
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
||||
|
@ -28,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
static int
|
||||
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
int num_ports = MLX5_TOTAL_VPORTS(dev);
|
||||
int num_ports = mlx5_eswitch_get_total_vports(dev);
|
||||
const struct mlx5_ib_profile *profile;
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
int vport_index;
|
||||
|
@ -50,7 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
}
|
||||
|
||||
ibdev->is_rep = true;
|
||||
vport_index = ibdev->free_port++;
|
||||
vport_index = rep->vport_index;
|
||||
ibdev->port[vport_index].rep = rep;
|
||||
ibdev->port[vport_index].roce.netdev =
|
||||
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
|
||||
|
@ -68,15 +69,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
static void
|
||||
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
|
||||
struct mlx5_ib_port *port;
|
||||
|
||||
if (!rep->rep_data[REP_IB].priv ||
|
||||
rep->vport != MLX5_VPORT_UPLINK)
|
||||
return;
|
||||
|
||||
dev = mlx5_ib_rep_to_dev(rep);
|
||||
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
||||
port = &dev->port[rep->vport_index];
|
||||
write_lock(&port->roce.netdev_lock);
|
||||
port->roce.netdev = NULL;
|
||||
write_unlock(&port->roce.netdev_lock);
|
||||
rep->rep_data[REP_IB].priv = NULL;
|
||||
port->rep = NULL;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
||||
}
|
||||
|
||||
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||
|
|
|
@ -28,7 +28,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
|||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return SRIOV_NONE;
|
||||
return MLX5_ESWITCH_NONE;
|
||||
}
|
||||
|
||||
static inline
|
||||
|
|
|
@ -4492,7 +4492,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
|
|||
* lock/unlock above locks Now need to arm all involved CQs.
|
||||
*/
|
||||
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
|
||||
mcq->comp(mcq);
|
||||
mcq->comp(mcq, NULL);
|
||||
}
|
||||
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
|
||||
}
|
||||
|
@ -6814,7 +6814,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
printk_once(KERN_INFO "%s", mlx5_version);
|
||||
|
||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
|
||||
if (!mlx5_core_mp_enabled(mdev))
|
||||
mlx5_ib_register_vport_reps(mdev);
|
||||
return mdev;
|
||||
|
|
|
@ -978,7 +978,6 @@ struct mlx5_ib_dev {
|
|||
u16 devx_whitelist_uid;
|
||||
struct mlx5_srq_table srq_table;
|
||||
struct mlx5_async_ctx async_ctx;
|
||||
int free_port;
|
||||
};
|
||||
|
||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||
|
|
|
@ -1558,9 +1558,9 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
|
|||
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = 0,
|
||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
|
||||
.nent = MLX5_IB_NUM_PF_EQE,
|
||||
};
|
||||
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
|
||||
eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
|
||||
if (IS_ERR(eq->core)) {
|
||||
err = PTR_ERR(eq->core);
|
||||
|
|
|
@ -6297,7 +6297,7 @@ static void handle_drain_completion(struct ib_cq *cq,
|
|||
/* Run the CQ handler - this makes sure that the drain WR will
|
||||
* be processed if wasn't processed yet.
|
||||
*/
|
||||
mcq->mcq.comp(&mcq->mcq);
|
||||
mcq->mcq.comp(&mcq->mcq, NULL);
|
||||
}
|
||||
|
||||
wait_for_completion(&sdrain->done);
|
||||
|
|
|
@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
|
|||
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
|
||||
tasklet_ctx.list) {
|
||||
list_del_init(&mcq->tasklet_ctx.list);
|
||||
mcq->tasklet_ctx.comp(mcq);
|
||||
mcq->tasklet_ctx.comp(mcq, NULL);
|
||||
mlx5_cq_put(mcq);
|
||||
if (time_after(jiffies, end))
|
||||
break;
|
||||
|
@ -68,7 +68,8 @@ void mlx5_cq_tasklet_cb(unsigned long data)
|
|||
tasklet_schedule(&ctx->task);
|
||||
}
|
||||
|
||||
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
|
||||
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
|
||||
|
@ -87,11 +88,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
|
|||
}
|
||||
|
||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
u32 *in, int inlen)
|
||||
u32 *in, int inlen, u32 *out, int outlen)
|
||||
{
|
||||
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
|
||||
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
|
||||
struct mlx5_eq_comp *eq;
|
||||
int err;
|
||||
|
@ -100,9 +100,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
|||
if (IS_ERR(eq))
|
||||
return PTR_ERR(eq);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
memset(out, 0, outlen);
|
||||
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -158,13 +158,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
|||
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
|
||||
int err;
|
||||
|
||||
err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_eq_del_cq(&cq->eq->core, cq);
|
||||
if (err)
|
||||
return err;
|
||||
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
||||
mlx5_eq_del_cq(&cq->eq->core, cq);
|
||||
|
||||
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
|
||||
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
|
||||
|
|
|
@ -311,13 +311,20 @@ static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
|
|||
/* Must be called with intf_mutex held */
|
||||
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 pci_id = mlx5_gen_pci_id(dev);
|
||||
struct mlx5_core_dev *res = NULL;
|
||||
struct mlx5_core_dev *tmp_dev;
|
||||
struct mlx5_priv *priv;
|
||||
u32 pci_id;
|
||||
|
||||
if (!mlx5_core_is_pf(dev))
|
||||
return NULL;
|
||||
|
||||
pci_id = mlx5_gen_pci_id(dev);
|
||||
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
|
||||
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
if (!mlx5_core_is_pf(tmp_dev))
|
||||
continue;
|
||||
|
||||
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
|
||||
res = tmp_dev;
|
||||
break;
|
||||
|
|
|
@ -782,7 +782,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
|
||||
|
||||
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
|
||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
|
||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
|
||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
||||
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||
|
|
|
@ -680,7 +680,7 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
|
|||
|
||||
memset(perm_addr, 0xff, MAX_ADDR_LEN);
|
||||
|
||||
mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
|
||||
mlx5_query_mac_address(priv->mdev, perm_addr);
|
||||
}
|
||||
|
||||
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
|
||||
|
|
|
@ -1516,6 +1516,7 @@ static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
|||
|
||||
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
struct mlx5_core_dev *mdev = cq->mdev;
|
||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||
|
||||
|
@ -1550,7 +1551,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
||||
|
||||
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
|
||||
err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
|
||||
|
||||
kvfree(in);
|
||||
|
||||
|
@ -4588,7 +4589,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
|
||||
mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
|
||||
if (is_zero_ether_addr(netdev->dev_addr) &&
|
||||
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
|
||||
eth_hw_addr_random(netdev);
|
||||
|
@ -5140,7 +5141,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
|
|||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||
mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
||||
mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
|
||||
mlx5e_rep_register_vport_reps(mdev);
|
||||
return mdev;
|
||||
}
|
||||
|
|
|
@ -398,7 +398,7 @@ static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
|
|||
priv = netdev_priv(dev);
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
if (esw->mode == MLX5_ESWITCH_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
|
||||
|
@ -414,7 +414,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
|
|||
struct mlx5e_rep_sq *rep_sq, *tmp;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||
return;
|
||||
|
||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
|
@ -435,7 +435,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
|||
int err;
|
||||
int i;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||
return 0;
|
||||
|
||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
|
@ -1392,7 +1392,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
|||
SET_NETDEV_DEV(netdev, mdev->device);
|
||||
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
|
||||
/* we want a persistent mac for the uplink rep */
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
|
||||
mlx5_query_mac_address(mdev, netdev->dev_addr);
|
||||
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
if (MLX5_CAP_GEN(mdev, qos))
|
||||
|
|
|
@ -3372,7 +3372,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
|
|||
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
||||
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
|
||||
filter_dev, flow);
|
||||
else
|
||||
|
|
|
@ -134,7 +134,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
|||
return work_done;
|
||||
}
|
||||
|
||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
|
||||
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
|||
cq = mlx5_eq_cq_get(eq, cqn);
|
||||
if (likely(cq)) {
|
||||
++cq->arm_sn;
|
||||
cq->comp(cq);
|
||||
cq->comp(cq, eqe);
|
||||
mlx5_cq_put(cq);
|
||||
} else {
|
||||
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
|
||||
|
@ -256,6 +256,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||
int inlen;
|
||||
u32 *in;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
/* Init CQ table */
|
||||
memset(cq_table, 0, sizeof(*cq_table));
|
||||
|
@ -283,10 +284,12 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||
mlx5_fill_page_array(&eq->buf, pas);
|
||||
|
||||
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
|
||||
if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
|
||||
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
|
||||
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
|
||||
|
||||
MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
|
||||
for (i = 0; i < 4; i++)
|
||||
MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
|
||||
param->mask[i]);
|
||||
|
||||
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
|
||||
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
|
||||
|
@ -389,7 +392,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||
{
|
||||
struct mlx5_cq_table *table = &eq->cq_table;
|
||||
struct mlx5_core_cq *tmp;
|
||||
|
@ -399,16 +402,14 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
|||
spin_unlock(&table->lock);
|
||||
|
||||
if (!tmp) {
|
||||
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
|
||||
return -ENOENT;
|
||||
mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
|
||||
eq->eqn, cq->cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tmp != cq) {
|
||||
mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (tmp != cq)
|
||||
mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
|
||||
eq->eqn, cq->cqn);
|
||||
}
|
||||
|
||||
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
|
||||
|
@ -502,14 +503,31 @@ static int cq_err_event_notifier(struct notifier_block *nb,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
cq->event(cq, type);
|
||||
if (cq->event)
|
||||
cq->event(cq, type);
|
||||
|
||||
mlx5_cq_put(cq);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
||||
static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
|
||||
{
|
||||
__be64 *user_unaffiliated_events;
|
||||
__be64 *user_affiliated_events;
|
||||
int i;
|
||||
|
||||
user_affiliated_events =
|
||||
MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
|
||||
user_unaffiliated_events =
|
||||
MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
mask[i] |= be64_to_cpu(user_affiliated_events[i] |
|
||||
user_unaffiliated_events[i]);
|
||||
}
|
||||
|
||||
static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
|
||||
{
|
||||
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
||||
|
||||
|
@ -546,7 +564,10 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
|
|||
async_event_mask |=
|
||||
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
|
||||
|
||||
return async_event_mask;
|
||||
mask[0] = async_event_mask;
|
||||
|
||||
if (MLX5_CAP_GEN(dev, event_cap))
|
||||
gather_user_async_events(dev, mask);
|
||||
}
|
||||
|
||||
static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||
|
@ -561,9 +582,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = 0,
|
||||
.mask = 1ull << MLX5_EVENT_TYPE_CMD,
|
||||
.nent = MLX5_NUM_CMD_EQE,
|
||||
};
|
||||
|
||||
param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
|
||||
err = create_async_eq(dev, &table->cmd_eq.core, ¶m);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
|
||||
|
@ -579,9 +601,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = 0,
|
||||
.mask = gather_async_events_mask(dev),
|
||||
.nent = MLX5_NUM_ASYNC_EQE,
|
||||
};
|
||||
|
||||
gather_async_events_mask(dev, param.mask);
|
||||
err = create_async_eq(dev, &table->async_eq.core, ¶m);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
|
||||
|
@ -597,9 +620,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = 0,
|
||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
|
||||
.nent = /* TODO: sriov max_vf + */ 1,
|
||||
};
|
||||
|
||||
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
|
||||
err = create_async_eq(dev, &table->pages_eq.core, ¶m);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
|
||||
|
@ -791,7 +815,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
|||
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = vecidx,
|
||||
.mask = 0,
|
||||
.nent = nent,
|
||||
};
|
||||
err = create_map_eq(dev, &eq->core, ¶m);
|
||||
|
@ -927,6 +950,7 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
|||
|
||||
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eq_notifier_register);
|
||||
|
||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||
{
|
||||
|
@ -937,3 +961,4 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
|||
|
||||
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
|
||||
|
|
|
@ -497,7 +497,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
|
||||
fdb_add:
|
||||
/* SRIOV is enabled: Forward UC MAC to vport */
|
||||
if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
|
||||
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
||||
|
||||
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
|
||||
|
@ -897,7 +897,7 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
|
|||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
u8 mac[ETH_ALEN];
|
||||
|
||||
mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
|
||||
mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
|
||||
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
||||
vport->vport, mac);
|
||||
|
||||
|
@ -1553,6 +1553,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
struct mlx5_vport *vport)
|
||||
{
|
||||
u16 vport_num = vport->vport;
|
||||
int flags;
|
||||
|
||||
if (esw->manager_vport == vport_num)
|
||||
return;
|
||||
|
@ -1570,11 +1571,13 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
vport->info.node_guid);
|
||||
}
|
||||
|
||||
flags = (vport->info.vlan || vport->info.qos) ?
|
||||
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
||||
(vport->info.vlan || vport->info.qos));
|
||||
flags);
|
||||
|
||||
/* Only legacy mode needs ACLs */
|
||||
if (esw->mode == SRIOV_LEGACY) {
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
esw_vport_ingress_config(esw, vport);
|
||||
esw_vport_egress_config(esw, vport);
|
||||
}
|
||||
|
@ -1626,7 +1629,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
||||
|
||||
/* Create steering drop counters for ingress and egress ACLs */
|
||||
if (vport_num && esw->mode == SRIOV_LEGACY)
|
||||
if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_vport_create_drop_counters(vport);
|
||||
|
||||
/* Restore old vport configuration */
|
||||
|
@ -1680,7 +1683,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||
vport->enabled_events = 0;
|
||||
esw_vport_disable_qos(esw, vport);
|
||||
if (esw->manager_vport != vport_num &&
|
||||
esw->mode == SRIOV_LEGACY) {
|
||||
esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
mlx5_modify_vport_admin_state(esw->dev,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
vport_num, 1,
|
||||
|
@ -1712,59 +1715,91 @@ static int eswitch_vport_event(struct notifier_block *nb,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
||||
/**
|
||||
* mlx5_esw_query_functions - Returns raw output about functions state
|
||||
* @dev: Pointer to device to query
|
||||
*
|
||||
* mlx5_esw_query_functions() allocates and returns functions changed
|
||||
* raw output memory pointer from device on success. Otherwise returns ERR_PTR.
|
||||
* Caller must free the memory using kvfree() when valid pointer is returned.
|
||||
*/
|
||||
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
|
||||
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
|
||||
u32 *out;
|
||||
int err;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
MLX5_SET(query_esw_functions_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
if (!err)
|
||||
return out;
|
||||
|
||||
kvfree(out);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
|
||||
{
|
||||
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
||||
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
|
||||
MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
|
||||
ESW_FUNCTIONS_CHANGED);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
|
||||
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
||||
|
||||
flush_workqueue(esw->work_queue);
|
||||
}
|
||||
|
||||
/* Public E-Switch API */
|
||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||
|
||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int total_nvports = 0;
|
||||
int err;
|
||||
int i, enabled_events;
|
||||
|
||||
if (!ESW_ALLOWED(esw) ||
|
||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
||||
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
|
||||
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
||||
esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
|
||||
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
|
||||
|
||||
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
||||
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
|
||||
|
||||
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
|
||||
|
||||
if (mode == SRIOV_OFFLOADS) {
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
total_nvports = esw->total_vports;
|
||||
else
|
||||
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||
}
|
||||
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
|
||||
|
||||
esw->mode = mode;
|
||||
|
||||
mlx5_lag_update(esw->dev);
|
||||
|
||||
if (mode == SRIOV_LEGACY) {
|
||||
if (mode == MLX5_ESWITCH_LEGACY) {
|
||||
err = esw_create_legacy_table(esw);
|
||||
if (err)
|
||||
goto abort;
|
||||
} else {
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
err = esw_offloads_init(esw, nvfs, total_nvports);
|
||||
err = esw_offloads_init(esw);
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
@ -1774,11 +1809,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
if (err)
|
||||
esw_warn(esw->dev, "Failed to create eswitch TSAR");
|
||||
|
||||
/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
|
||||
* 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
|
||||
* 2. FDB/Eswitch is programmed by user space tools
|
||||
*/
|
||||
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
|
||||
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
|
||||
UC_ADDR_CHANGE;
|
||||
|
||||
/* Enable PF vport */
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
|
@ -1791,22 +1823,21 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
}
|
||||
|
||||
/* Enable VF vports */
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
|
||||
if (mode == SRIOV_LEGACY) {
|
||||
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->nb);
|
||||
}
|
||||
mlx5_eswitch_event_handlers_register(esw);
|
||||
|
||||
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||
|
||||
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
|
||||
esw->enabled_vports);
|
||||
return 0;
|
||||
|
||||
abort:
|
||||
esw->mode = SRIOV_NONE;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
|
||||
if (mode == SRIOV_OFFLOADS) {
|
||||
if (mode == MLX5_ESWITCH_OFFLOADS) {
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
}
|
||||
|
@ -1814,23 +1845,22 @@ abort:
|
|||
return err;
|
||||
}
|
||||
|
||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
||||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct esw_mc_addr *mc_promisc;
|
||||
struct mlx5_vport *vport;
|
||||
int old_mode;
|
||||
int i;
|
||||
|
||||
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
|
||||
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
|
||||
return;
|
||||
|
||||
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
|
||||
esw->enabled_vports, esw->mode);
|
||||
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||
|
||||
mc_promisc = &esw->mc_promisc;
|
||||
|
||||
if (esw->mode == SRIOV_LEGACY)
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
|
||||
mlx5_eswitch_event_handlers_unregister(esw);
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport)
|
||||
esw_disable_vport(esw, vport);
|
||||
|
@ -1840,17 +1870,17 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
|||
|
||||
esw_destroy_tsar(esw);
|
||||
|
||||
if (esw->mode == SRIOV_LEGACY)
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_destroy_legacy_table(esw);
|
||||
else if (esw->mode == SRIOV_OFFLOADS)
|
||||
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
esw_offloads_cleanup(esw);
|
||||
|
||||
old_mode = esw->mode;
|
||||
esw->mode = SRIOV_NONE;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
|
||||
mlx5_lag_update(esw->dev);
|
||||
|
||||
if (old_mode == SRIOV_OFFLOADS) {
|
||||
if (old_mode == MLX5_ESWITCH_OFFLOADS) {
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
}
|
||||
|
@ -1858,14 +1888,16 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
|
|||
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int total_vports = MLX5_TOTAL_VPORTS(dev);
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mlx5_vport *vport;
|
||||
int total_vports;
|
||||
int err, i;
|
||||
|
||||
if (!MLX5_VPORT_MANAGER(dev))
|
||||
return 0;
|
||||
|
||||
total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
|
||||
esw_info(dev,
|
||||
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
|
||||
total_vports,
|
||||
|
@ -1878,6 +1910,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||
|
||||
esw->dev = dev;
|
||||
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
|
||||
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
|
||||
|
||||
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
|
||||
if (!esw->work_queue) {
|
||||
|
@ -1911,7 +1944,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||
}
|
||||
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = SRIOV_NONE;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||
|
@ -1981,7 +2014,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||
|
||||
ether_addr_copy(evport->info.mac, mac);
|
||||
evport->info.node_guid = node_guid;
|
||||
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
||||
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
err = esw_vport_ingress_config(esw, evport);
|
||||
|
||||
unlock:
|
||||
|
@ -2065,7 +2098,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|||
|
||||
evport->info.vlan = vlan;
|
||||
evport->info.qos = qos;
|
||||
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
|
||||
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
err = esw_vport_ingress_config(esw, evport);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
@ -2107,7 +2140,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
|
|||
mlx5_core_warn(esw->dev,
|
||||
"Spoofchk in set while MAC is invalid, vport(%d)\n",
|
||||
evport->vport);
|
||||
if (evport->enabled && esw->mode == SRIOV_LEGACY)
|
||||
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
err = esw_vport_ingress_config(esw, evport);
|
||||
if (err)
|
||||
evport->info.spoofchk = pschk;
|
||||
|
@ -2203,7 +2236,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
|
|||
return -EPERM;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (esw->mode != SRIOV_LEGACY) {
|
||||
if (esw->mode != MLX5_ESWITCH_LEGACY) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2226,7 +2259,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
|
|||
return -EPERM;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (esw->mode != SRIOV_LEGACY) {
|
||||
if (esw->mode != MLX5_ESWITCH_LEGACY) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2369,7 +2402,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|||
u64 bytes = 0;
|
||||
int err = 0;
|
||||
|
||||
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
|
||||
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
|
||||
return 0;
|
||||
|
||||
if (vport->egress.drop_counter)
|
||||
|
@ -2479,7 +2512,7 @@ free_out:
|
|||
|
||||
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
|
||||
return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
||||
|
||||
|
@ -2496,10 +2529,10 @@ EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
|
|||
|
||||
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
||||
{
|
||||
if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
|
||||
dev1->priv.eswitch->mode == SRIOV_NONE) ||
|
||||
(dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
||||
dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
|
||||
if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
|
||||
(dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -2508,6 +2541,26 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
|
|||
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
||||
struct mlx5_core_dev *dev1)
|
||||
{
|
||||
return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
|
||||
dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
|
||||
return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
|
||||
}
|
||||
|
||||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
|
||||
{
|
||||
const u32 *out;
|
||||
|
||||
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
|
||||
|
||||
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
esw->esw_funcs.num_vfs = num_vfs;
|
||||
return;
|
||||
}
|
||||
|
||||
out = mlx5_esw_query_functions(esw->dev);
|
||||
if (IS_ERR(out))
|
||||
return;
|
||||
|
||||
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
kvfree(out);
|
||||
}
|
||||
|
|
|
@ -229,12 +229,12 @@ struct mlx5_eswitch {
|
|||
int mode;
|
||||
int nvports;
|
||||
u16 manager_vport;
|
||||
u16 first_host_vport;
|
||||
struct mlx5_esw_functions esw_funcs;
|
||||
};
|
||||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int total_nvports);
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw);
|
||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
|
@ -255,8 +255,8 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
|
||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
|
||||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
u16 vport, u8 mac[ETH_ALEN]);
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
|
@ -392,7 +392,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
|||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
|
||||
enum devlink_eswitch_encap_mode encap,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -425,7 +425,7 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
|
|||
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
||||
struct mlx5_core_dev *dev1);
|
||||
|
||||
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen);
|
||||
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
|
||||
|
||||
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
|
||||
|
||||
|
@ -445,6 +445,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
|
|||
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_ecpf_esw_manager(dev) ?
|
||||
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* Ideally device should have the functions changed supported
|
||||
|
@ -541,25 +547,48 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
|||
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
|
||||
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
|
||||
|
||||
/* Includes host PF (vport 0) if it's not esw manager. */
|
||||
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
|
||||
for ((i) = (esw)->first_host_vport; \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) <= (nvfs); (i)++)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
|
||||
for ((i) = (nvfs); \
|
||||
(rep) = &(esw)->offloads.vport_reps[i], \
|
||||
(i) >= (esw)->first_host_vport; (i)--)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
|
||||
for ((vport) = (esw)->first_host_vport; \
|
||||
(vport) <= (nvfs); (vport)++)
|
||||
|
||||
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
|
||||
for ((vport) = (nvfs); \
|
||||
(vport) >= (esw)->first_host_vport; (vport)--)
|
||||
|
||||
struct mlx5_vport *__must_check
|
||||
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
|
||||
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
|
||||
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
|
||||
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
|
||||
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
static inline int
|
||||
mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
||||
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
|
||||
|
||||
#define FDB_MAX_CHAIN 1
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 1
|
||||
|
|
|
@ -147,7 +147,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
struct mlx5_flow_table *fdb;
|
||||
int j, i = 0;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
flow_act.action = attr->action;
|
||||
|
@ -357,11 +357,10 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
|
|||
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int vf_vport, err = 0;
|
||||
int i, err = 0;
|
||||
|
||||
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
|
||||
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
|
||||
rep = &esw->offloads.vport_reps[vf_vport];
|
||||
mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
|
||||
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
|
||||
continue;
|
||||
|
||||
|
@ -1370,21 +1369,22 @@ out:
|
|||
static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
int err, err1;
|
||||
|
||||
if (esw->mode != SRIOV_LEGACY &&
|
||||
if (esw->mode != MLX5_ESWITCH_LEGACY &&
|
||||
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't set offloads mode, SRIOV legacy not enabled");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
|
||||
mlx5_eswitch_disable(esw);
|
||||
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch to offloads");
|
||||
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to legacy");
|
||||
|
@ -1392,7 +1392,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
|||
}
|
||||
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
||||
if (mlx5_eswitch_inline_mode_get(esw,
|
||||
num_vfs,
|
||||
&esw->offloads.inline_mode)) {
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
|
@ -1409,11 +1408,11 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
|
|||
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
|
||||
int total_vports = esw->total_vports;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
u8 hw_id[ETH_ALEN], rep_type;
|
||||
int vport;
|
||||
int vport_index;
|
||||
|
||||
esw->offloads.vport_reps = kcalloc(total_vports,
|
||||
sizeof(struct mlx5_eswitch_rep),
|
||||
|
@ -1421,10 +1420,11 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
|||
if (!esw->offloads.vport_reps)
|
||||
return -ENOMEM;
|
||||
|
||||
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
|
||||
mlx5_query_mac_address(dev, hw_id);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, vport, rep) {
|
||||
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
|
||||
mlx5_esw_for_all_reps(esw, vport_index, rep) {
|
||||
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
|
||||
rep->vport_index = vport_index;
|
||||
ether_addr_copy(rep->hw_id, hw_id);
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
|
||||
|
@ -1479,21 +1479,20 @@ static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
|
|||
__unload_reps_vf_vport(esw, nvports, rep_type);
|
||||
}
|
||||
|
||||
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
|
||||
u8 rep_type)
|
||||
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
__unload_reps_vf_vport(esw, nvports, rep_type);
|
||||
__unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
|
||||
|
||||
/* Special vports must be the last to unload. */
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
}
|
||||
|
||||
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u8 rep_type = NUM_REP_TYPES;
|
||||
|
||||
while (rep_type-- > 0)
|
||||
__unload_reps_all_vport(esw, nvports, rep_type);
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
}
|
||||
|
||||
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
|
||||
|
@ -1569,6 +1568,26 @@ err_vf:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Special vports must be loaded first, uplink rep creates mdev resource. */
|
||||
err = __load_reps_special_vport(esw, rep_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
|
||||
if (err)
|
||||
goto err_vfs;
|
||||
|
||||
return 0;
|
||||
|
||||
err_vfs:
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
u8 rep_type = 0;
|
||||
|
@ -1588,13 +1607,13 @@ err_reps:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
|
||||
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u8 rep_type = 0;
|
||||
int err;
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
||||
err = __load_reps_special_vport(esw, rep_type);
|
||||
err = __load_reps_all_vport(esw, rep_type);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
@ -1603,7 +1622,7 @@ static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
|
|||
|
||||
err_reps:
|
||||
while (rep_type-- > 0)
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1989,11 +2008,17 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
|
|||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
}
|
||||
|
||||
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int nvports)
|
||||
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int num_vfs = esw->esw_funcs.num_vfs;
|
||||
int total_vports;
|
||||
int err;
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
total_vports = esw->total_vports;
|
||||
else
|
||||
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||
|
||||
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
|
||||
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
||||
|
@ -2001,15 +2026,15 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = esw_create_offloads_fdb_tables(esw, nvports);
|
||||
err = esw_create_offloads_fdb_tables(esw, total_vports);
|
||||
if (err)
|
||||
goto create_fdb_err;
|
||||
|
||||
err = esw_create_offloads_table(esw, nvports);
|
||||
err = esw_create_offloads_table(esw, total_vports);
|
||||
if (err)
|
||||
goto create_ft_err;
|
||||
|
||||
err = esw_create_vport_rx_group(esw, nvports);
|
||||
err = esw_create_vport_rx_group(esw, total_vports);
|
||||
if (err)
|
||||
goto create_fg_err;
|
||||
|
||||
|
@ -2035,56 +2060,53 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
|
|||
esw_destroy_offloads_acl_tables(esw);
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_handler(struct work_struct *work)
|
||||
static void
|
||||
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
|
||||
struct mlx5_host_work *host_work;
|
||||
struct mlx5_eswitch *esw;
|
||||
u16 num_vfs = 0;
|
||||
int err;
|
||||
bool host_pf_disabled;
|
||||
u16 new_num_vfs;
|
||||
|
||||
host_work = container_of(work, struct mlx5_host_work, work);
|
||||
esw = host_work->esw;
|
||||
new_num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_pf_disabled);
|
||||
|
||||
err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
|
||||
num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
if (err || num_vfs == esw->esw_funcs.num_vfs)
|
||||
goto out;
|
||||
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
|
||||
return;
|
||||
|
||||
/* Number of VFs can only change from "0 to x" or "x to 0". */
|
||||
if (esw->esw_funcs.num_vfs > 0) {
|
||||
esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
|
||||
} else {
|
||||
err = esw_offloads_load_vf_reps(esw, num_vfs);
|
||||
int err;
|
||||
|
||||
err = esw_offloads_load_vf_reps(esw, new_num_vfs);
|
||||
if (err)
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
esw->esw_funcs.num_vfs = new_num_vfs;
|
||||
}
|
||||
|
||||
esw->esw_funcs.num_vfs = num_vfs;
|
||||
static void esw_functions_changed_event_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_host_work *host_work;
|
||||
struct mlx5_eswitch *esw;
|
||||
const u32 *out;
|
||||
|
||||
host_work = container_of(work, struct mlx5_host_work, work);
|
||||
esw = host_work->esw;
|
||||
|
||||
out = mlx5_esw_query_functions(esw->dev);
|
||||
if (IS_ERR(out))
|
||||
goto out;
|
||||
|
||||
esw_vfs_changed_event_handler(esw, out);
|
||||
kvfree(out);
|
||||
out:
|
||||
kfree(host_work);
|
||||
}
|
||||
|
||||
static void esw_emulate_event_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_host_work *host_work =
|
||||
container_of(work, struct mlx5_host_work, work);
|
||||
struct mlx5_eswitch *esw = host_work->esw;
|
||||
int err;
|
||||
|
||||
if (esw->esw_funcs.num_vfs) {
|
||||
err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
|
||||
if (err)
|
||||
esw_warn(esw->dev, "Load vf reps err=%d\n", err);
|
||||
}
|
||||
kfree(host_work);
|
||||
}
|
||||
|
||||
static int esw_functions_changed_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_esw_functions *esw_funcs;
|
||||
struct mlx5_host_work *host_work;
|
||||
|
@ -2099,44 +2121,17 @@ static int esw_functions_changed_event(struct notifier_block *nb,
|
|||
|
||||
host_work->esw = esw;
|
||||
|
||||
if (mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
INIT_WORK(&host_work->work,
|
||||
esw_functions_changed_event_handler);
|
||||
else
|
||||
INIT_WORK(&host_work->work, esw_emulate_event_handler);
|
||||
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
|
||||
queue_work(esw->work_queue, &host_work->work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
|
||||
u16 vf_nvports)
|
||||
{
|
||||
if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
|
||||
esw->esw_funcs.num_vfs = 0;
|
||||
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
|
||||
ESW_FUNCTIONS_CHANGED);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||
} else {
|
||||
esw->esw_funcs.num_vfs = vf_nvports;
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
return;
|
||||
|
||||
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
|
||||
flush_workqueue(esw->work_queue);
|
||||
}
|
||||
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int total_nvports)
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
|
||||
err = esw_offloads_steering_init(esw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2146,31 +2141,15 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
|||
goto err_vport_metadata;
|
||||
}
|
||||
|
||||
/* Only load special vports reps. VF reps will be loaded in
|
||||
* context of functions_changed event handler through real
|
||||
* or emulated event.
|
||||
*/
|
||||
err = esw_offloads_load_special_vport(esw);
|
||||
err = esw_offloads_load_all_reps(esw);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
|
||||
esw_offloads_devcom_init(esw);
|
||||
mutex_init(&esw->offloads.termtbl_mutex);
|
||||
|
||||
esw_functions_changed_event_init(esw, vf_nvports);
|
||||
|
||||
mlx5_rdma_enable_roce(esw->dev);
|
||||
|
||||
/* Call esw_functions_changed event to load VF reps:
|
||||
* 1. HW does not support the event then emulate it
|
||||
* Or
|
||||
* 2. The event was already notified when num_vfs changed
|
||||
* and eswitch was in legacy mode
|
||||
*/
|
||||
esw_functions_changed_event(&esw->esw_funcs.nb.nb,
|
||||
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
|
||||
NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -2184,13 +2163,13 @@ err_vport_metadata:
|
|||
static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
int err, err1;
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
mlx5_eswitch_disable(esw);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
||||
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
|
||||
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to offloads");
|
||||
|
@ -2202,10 +2181,9 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
|||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_functions_changed_event_cleanup(esw);
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
esw_offloads_devcom_cleanup(esw);
|
||||
esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
|
||||
esw_offloads_unload_all_reps(esw);
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
mlx5_eswitch_disable_passing_vport_metadata(esw);
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
|
@ -2215,10 +2193,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
|
|||
{
|
||||
switch (mode) {
|
||||
case DEVLINK_ESWITCH_MODE_LEGACY:
|
||||
*mlx5_mode = SRIOV_LEGACY;
|
||||
*mlx5_mode = MLX5_ESWITCH_LEGACY;
|
||||
break;
|
||||
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
|
||||
*mlx5_mode = SRIOV_OFFLOADS;
|
||||
*mlx5_mode = MLX5_ESWITCH_OFFLOADS;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2230,10 +2208,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
|
|||
static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
|
||||
{
|
||||
switch (mlx5_mode) {
|
||||
case SRIOV_LEGACY:
|
||||
case MLX5_ESWITCH_LEGACY:
|
||||
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
|
||||
break;
|
||||
case SRIOV_OFFLOADS:
|
||||
case MLX5_ESWITCH_OFFLOADS:
|
||||
*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
|
||||
break;
|
||||
default:
|
||||
|
@ -2297,7 +2275,7 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
|
|||
if(!MLX5_ESWITCH_MANAGER(dev))
|
||||
return -EPERM;
|
||||
|
||||
if (dev->priv.eswitch->mode == SRIOV_NONE &&
|
||||
if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||
!mlx5_core_is_ecpf_esw_manager(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -2348,7 +2326,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
|||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int err, vport;
|
||||
int err, vport, num_vport;
|
||||
u8 mlx5_mode;
|
||||
|
||||
err = mlx5_devlink_eswitch_check(devlink);
|
||||
|
@ -2377,7 +2355,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
for (vport = 1; vport < esw->enabled_vports; vport++) {
|
||||
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
|
||||
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
|
@ -2390,7 +2368,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
|||
return 0;
|
||||
|
||||
revert_inline_mode:
|
||||
while (--vport > 0)
|
||||
num_vport = --vport;
|
||||
mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
|
||||
mlx5_modify_nic_vport_min_inline(dev,
|
||||
vport,
|
||||
esw->offloads.inline_mode);
|
||||
|
@ -2411,7 +2390,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
|
|||
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
|
||||
{
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
|
@ -2420,7 +2399,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
|||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
if (esw->mode == MLX5_ESWITCH_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
|
@ -2435,9 +2414,10 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
|||
}
|
||||
|
||||
query_vports:
|
||||
for (vport = 1; vport <= nvfs; vport++) {
|
||||
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
|
||||
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
|
||||
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
|
||||
if (prev_mlx5_mode != mlx5_mode)
|
||||
return -EINVAL;
|
||||
prev_mlx5_mode = mlx5_mode;
|
||||
}
|
||||
|
@ -2467,7 +2447,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
|
|||
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_LEGACY) {
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
esw->offloads.encap = encap;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2530,12 +2510,11 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
|
|||
|
||||
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
u16 max_vf = mlx5_core_max_vfs(esw->dev);
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
|
||||
if (esw->mode == SRIOV_OFFLOADS)
|
||||
__unload_reps_all_vport(esw, max_vf, rep_type);
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, i, rep)
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
|
|
|
@ -414,7 +414,8 @@ static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
|
|||
mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
|
||||
}
|
||||
|
||||
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
|
||||
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct mlx5_fpga_conn *conn;
|
||||
|
||||
|
@ -429,6 +430,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
struct mlx5_fpga_device *fdev = conn->fdev;
|
||||
struct mlx5_core_dev *mdev = fdev->mdev;
|
||||
u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
|
||||
struct mlx5_wq_param wqp;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int inlen, err, eqn;
|
||||
|
@ -476,7 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
|
||||
|
||||
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
|
||||
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
|
||||
kvfree(in);
|
||||
|
||||
if (err)
|
||||
|
@ -867,7 +869,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
|
|||
conn->cb_arg = attr->cb_arg;
|
||||
|
||||
remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
|
||||
err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
|
||||
err = mlx5_query_mac_address(fdev->mdev, remote_mac);
|
||||
if (err) {
|
||||
mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
|
||||
ret = ERR_PTR(err);
|
||||
|
|
|
@ -2092,7 +2092,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
|
|||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
|
||||
if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
|
||||
return NULL;
|
||||
|
||||
switch (type) {
|
||||
|
@ -2423,7 +2423,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
if (!steering->esw_egress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
|
||||
for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
|
@ -2438,7 +2438,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
if (!steering->esw_ingress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
|
||||
for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
|
@ -2606,16 +2606,18 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
|
|||
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
|
||||
sizeof(*steering->esw_egress_root_ns),
|
||||
GFP_KERNEL);
|
||||
steering->esw_egress_root_ns =
|
||||
kcalloc(total_vports,
|
||||
sizeof(*steering->esw_egress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_egress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = init_egress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
|
@ -2634,16 +2636,18 @@ cleanup_root_ns:
|
|||
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int total_vports = mlx5_eswitch_get_total_vports(dev);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
|
||||
sizeof(*steering->esw_ingress_root_ns),
|
||||
GFP_KERNEL);
|
||||
steering->esw_ingress_root_ns =
|
||||
kcalloc(total_vports,
|
||||
sizeof(*steering->esw_ingress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
|
||||
for (i = 0; i < total_vports; i++) {
|
||||
err = init_ingress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
|
|
|
@ -202,6 +202,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, event_cap)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -305,8 +305,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
|||
!mlx5_sriov_is_enabled(dev1);
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE &&
|
||||
dev1->priv.eswitch->mode == SRIOV_NONE;
|
||||
roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
|
||||
#endif
|
||||
|
||||
if (roce_lag)
|
||||
|
|
|
@ -75,7 +75,7 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev);
|
|||
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
|
||||
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
|
||||
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
|
||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||
|
@ -97,7 +97,4 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
|||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||
#endif
|
||||
|
||||
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -734,8 +734,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
|
|||
struct mlx5_priv *priv = &dev->priv;
|
||||
int err = 0;
|
||||
|
||||
priv->pci_dev_data = id->driver_data;
|
||||
|
||||
mutex_init(&dev->pci_status_mutex);
|
||||
pci_set_drvdata(dev->pdev, dev);
|
||||
|
||||
dev->bar_addr = pci_resource_start(pdev, 0);
|
||||
|
@ -1255,7 +1254,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
|
|||
|
||||
INIT_LIST_HEAD(&priv->ctx_list);
|
||||
spin_lock_init(&priv->ctx_lock);
|
||||
mutex_init(&dev->pci_status_mutex);
|
||||
mutex_init(&dev->intf_state_mutex);
|
||||
|
||||
mutex_init(&priv->bfregs.reg_head.lock);
|
||||
|
@ -1317,6 +1315,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
dev->device = &pdev->dev;
|
||||
dev->pdev = pdev;
|
||||
|
||||
dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
|
||||
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
|
||||
|
||||
err = mlx5_mdev_init(dev, prof_sel);
|
||||
if (err)
|
||||
goto mdev_init_err;
|
||||
|
|
|
@ -126,7 +126,7 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
|
|||
{
|
||||
u8 hw_id[ETH_ALEN];
|
||||
|
||||
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
|
||||
mlx5_query_mac_address(dev, hw_id);
|
||||
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
||||
addrconf_addr_eui48(&gid->raw[8], hw_id);
|
||||
}
|
||||
|
|
|
@ -74,17 +74,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
|
|||
int err;
|
||||
int vf;
|
||||
|
||||
if (sriov->enabled_vfs) {
|
||||
mlx5_core_warn(dev,
|
||||
"failed to enable SRIOV on device, already enabled with %d vfs\n",
|
||||
sriov->enabled_vfs);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
goto enable_vfs_hca;
|
||||
|
||||
err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
|
||||
mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
|
||||
err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev,
|
||||
"failed to enable eswitch SRIOV (%d)\n", err);
|
||||
|
@ -99,7 +93,6 @@ enable_vfs_hca:
|
|||
continue;
|
||||
}
|
||||
sriov->vfs_ctx[vf].enabled = 1;
|
||||
sriov->enabled_vfs++;
|
||||
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
|
||||
err = sriov_restore_guids(dev, vf);
|
||||
if (err) {
|
||||
|
@ -118,13 +111,11 @@ enable_vfs_hca:
|
|||
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||
int num_vfs = pci_num_vf(dev->pdev);
|
||||
int err;
|
||||
int vf;
|
||||
|
||||
if (!sriov->enabled_vfs)
|
||||
goto out;
|
||||
|
||||
for (vf = 0; vf < sriov->num_vfs; vf++) {
|
||||
for (vf = num_vfs - 1; vf >= 0; vf--) {
|
||||
if (!sriov->vfs_ctx[vf].enabled)
|
||||
continue;
|
||||
err = mlx5_core_disable_hca(dev, vf + 1);
|
||||
|
@ -133,12 +124,10 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
|
|||
continue;
|
||||
}
|
||||
sriov->vfs_ctx[vf].enabled = 0;
|
||||
sriov->enabled_vfs--;
|
||||
}
|
||||
|
||||
out:
|
||||
if (MLX5_ESWITCH_MANAGER(dev))
|
||||
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
|
||||
mlx5_eswitch_disable(dev->priv.eswitch);
|
||||
|
||||
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
|
||||
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
|
||||
|
@ -191,13 +180,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
|||
|
||||
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
|
||||
|
||||
if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
|
||||
if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
|
||||
return 0;
|
||||
|
||||
/* If sriov VFs exist in PCI level, enable them in device level */
|
||||
return mlx5_device_enable_sriov(dev, sriov->num_vfs);
|
||||
return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
|
||||
}
|
||||
|
||||
void mlx5_sriov_detach(struct mlx5_core_dev *dev)
|
||||
|
@ -210,22 +197,25 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
|
|||
|
||||
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
|
||||
u16 host_total_vfs;
|
||||
int err;
|
||||
const u32 *out;
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(dev)) {
|
||||
err = mlx5_esw_query_functions(dev, out, sizeof(out));
|
||||
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_total_vfs);
|
||||
out = mlx5_esw_query_functions(dev);
|
||||
|
||||
/* Old FW doesn't support getting total_vfs from esw func
|
||||
* but supports getting it from pci_sriov.
|
||||
*/
|
||||
if (!err && host_total_vfs)
|
||||
if (IS_ERR(out))
|
||||
goto done;
|
||||
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_total_vfs);
|
||||
kvfree(out);
|
||||
if (host_total_vfs)
|
||||
return host_total_vfs;
|
||||
}
|
||||
|
||||
done:
|
||||
return pci_sriov_get_totalvfs(dev->pdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include <linux/mlx5/eswitch.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
/* Mutex to hold while enabling or disabling RoCE */
|
||||
|
@ -155,11 +156,12 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
|||
}
|
||||
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr)
|
||||
u16 vport, bool other, u8 *addr)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
|
||||
u8 *out_addr;
|
||||
u32 *out;
|
||||
int err;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
|
@ -169,7 +171,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
|
||||
nic_vport_context.permanent_address);
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
|
||||
MLX5_SET(query_nic_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
|
||||
|
||||
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||
if (!err)
|
||||
ether_addr_copy(addr, &out_addr[2]);
|
||||
|
||||
|
@ -178,6 +185,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
|
||||
|
||||
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
|
||||
{
|
||||
return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
|
||||
|
||||
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr)
|
||||
{
|
||||
|
@ -194,9 +207,7 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
|||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
field_select.permanent_address, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
||||
|
||||
if (vport)
|
||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||
|
||||
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||
in, nic_vport_context);
|
||||
|
@ -291,9 +302,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
|||
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
|
||||
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||
|
||||
if (vport)
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
|
||||
if (err)
|
||||
|
@ -483,7 +492,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
|||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
field_select.node_guid, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||
|
||||
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||
in, nic_vport_context);
|
||||
|
@ -1157,3 +1166,17 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
|
|||
return tmp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
|
||||
|
||||
/**
|
||||
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
|
||||
*
|
||||
* @dev: Pointer to core device
|
||||
*
|
||||
* mlx5_eswitch_get_total_vports returns total number of vports for
|
||||
* the eswitch.
|
||||
*/
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
|
||||
|
|
|
@ -47,7 +47,7 @@ struct mlx5_core_cq {
|
|||
struct completion free;
|
||||
unsigned vector;
|
||||
unsigned int irqn;
|
||||
void (*comp) (struct mlx5_core_cq *);
|
||||
void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
|
||||
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
|
||||
u32 cons_index;
|
||||
unsigned arm_sn;
|
||||
|
@ -55,7 +55,7 @@ struct mlx5_core_cq {
|
|||
int pid;
|
||||
struct {
|
||||
struct list_head list;
|
||||
void (*comp)(struct mlx5_core_cq *);
|
||||
void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
|
||||
void *priv;
|
||||
} tasklet_ctx;
|
||||
int reset_notify_added;
|
||||
|
@ -185,7 +185,7 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
|
|||
}
|
||||
|
||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
u32 *in, int inlen);
|
||||
u32 *in, int inlen, u32 *out, int outlen);
|
||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
u32 *out, int outlen);
|
||||
|
|
|
@ -351,7 +351,7 @@ enum mlx5_event {
|
|||
|
||||
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
|
||||
|
||||
MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
|
||||
MLX5_EVENT_TYPE_MAX = 0x100,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -437,6 +437,7 @@ enum {
|
|||
MLX5_OPCODE_SET_PSV = 0x20,
|
||||
MLX5_OPCODE_GET_PSV = 0x21,
|
||||
MLX5_OPCODE_CHECK_PSV = 0x22,
|
||||
MLX5_OPCODE_DUMP = 0x23,
|
||||
MLX5_OPCODE_RGET_PSV = 0x26,
|
||||
MLX5_OPCODE_RCHECK_PSV = 0x27,
|
||||
|
||||
|
@ -444,6 +445,14 @@ enum {
|
|||
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_SET_PORT_RESET_QKEY = 0,
|
||||
MLX5_SET_PORT_GUID0 = 16,
|
||||
|
@ -1085,6 +1094,9 @@ enum mlx5_cap_type {
|
|||
MLX5_CAP_DEBUG,
|
||||
MLX5_CAP_RESERVED_14,
|
||||
MLX5_CAP_DEV_MEM,
|
||||
MLX5_CAP_RESERVED_16,
|
||||
MLX5_CAP_TLS,
|
||||
MLX5_CAP_DEV_EVENT = 0x14,
|
||||
/* NUM OF CAP Types */
|
||||
MLX5_CAP_NUM
|
||||
};
|
||||
|
@ -1263,6 +1275,12 @@ enum mlx5_qcam_feature_groups {
|
|||
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
|
||||
MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
|
||||
|
||||
#define MLX5_CAP_TLS(mdev, cap) \
|
||||
MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
|
||||
|
||||
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
|
||||
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_STAT_OK = 0x0,
|
||||
MLX5_CMD_STAT_INT_ERR = 0x1,
|
||||
|
|
|
@ -139,6 +139,7 @@ enum {
|
|||
MLX5_REG_MTPPS = 0x9053,
|
||||
MLX5_REG_MTPPSE = 0x9054,
|
||||
MLX5_REG_MPEGC = 0x9056,
|
||||
MLX5_REG_MCQS = 0x9060,
|
||||
MLX5_REG_MCQI = 0x9061,
|
||||
MLX5_REG_MCC = 0x9062,
|
||||
MLX5_REG_MCDA = 0x9063,
|
||||
|
@ -182,6 +183,11 @@ enum port_state_policy {
|
|||
MLX5_POLICY_INVALID = 0xffffffff
|
||||
};
|
||||
|
||||
enum mlx5_coredev_type {
|
||||
MLX5_COREDEV_PF,
|
||||
MLX5_COREDEV_VF
|
||||
};
|
||||
|
||||
struct mlx5_field_desc {
|
||||
struct dentry *dent;
|
||||
int i;
|
||||
|
@ -468,7 +474,6 @@ struct mlx5_vf_context {
|
|||
struct mlx5_core_sriov {
|
||||
struct mlx5_vf_context *vfs_ctx;
|
||||
int num_vfs;
|
||||
int enabled_vfs;
|
||||
u16 max_vfs;
|
||||
};
|
||||
|
||||
|
@ -572,7 +577,6 @@ struct mlx5_priv {
|
|||
struct mlx5_core_sriov sriov;
|
||||
struct mlx5_lag *lag;
|
||||
struct mlx5_devcom *devcom;
|
||||
unsigned long pci_dev_data;
|
||||
struct mlx5_core_roce roce;
|
||||
struct mlx5_fc_stats fc_stats;
|
||||
struct mlx5_rl_table rl_table;
|
||||
|
@ -653,6 +657,7 @@ struct mlx5_geneve;
|
|||
|
||||
struct mlx5_core_dev {
|
||||
struct device *device;
|
||||
enum mlx5_coredev_type coredev_type;
|
||||
struct pci_dev *pdev;
|
||||
/* sync pci state */
|
||||
struct mutex pci_status_mutex;
|
||||
|
@ -1047,6 +1052,8 @@ int mlx5_register_interface(struct mlx5_interface *intf);
|
|||
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
||||
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
|
||||
|
||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
||||
|
||||
|
@ -1087,9 +1094,9 @@ enum {
|
|||
MLX5_PCI_DEV_IS_VF = 1 << 0,
|
||||
};
|
||||
|
||||
static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
|
||||
static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
|
||||
return dev->coredev_type == MLX5_COREDEV_PF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
||||
|
@ -1097,17 +1104,18 @@ static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
|||
return dev->caps.embedded_cpu;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
|
||||
static inline bool
|
||||
mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
|
||||
}
|
||||
|
||||
static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev)
|
||||
static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
|
||||
}
|
||||
|
||||
static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
|
||||
static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.sriov.max_vfs;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ struct mlx5_core_dev;
|
|||
struct mlx5_eq_param {
|
||||
u8 irq_index;
|
||||
int nent;
|
||||
u64 mask;
|
||||
u64 mask[4];
|
||||
};
|
||||
|
||||
struct mlx5_eq *
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
|
||||
|
||||
enum {
|
||||
SRIOV_NONE,
|
||||
SRIOV_LEGACY,
|
||||
SRIOV_OFFLOADS
|
||||
MLX5_ESWITCH_NONE,
|
||||
MLX5_ESWITCH_LEGACY,
|
||||
MLX5_ESWITCH_OFFLOADS
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -46,6 +46,8 @@ struct mlx5_eswitch_rep {
|
|||
u16 vport;
|
||||
u8 hw_id[ETH_ALEN];
|
||||
u16 vlan;
|
||||
/* Only IB rep is using vport_index */
|
||||
u16 vport_index;
|
||||
u32 vlan_refcount;
|
||||
};
|
||||
|
||||
|
@ -64,6 +66,8 @@ struct mlx5_flow_handle *
|
|||
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
|
||||
u16 vport_num, u32 sqn);
|
||||
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
enum devlink_eswitch_encap_mode
|
||||
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
|
||||
|
@ -91,4 +95,5 @@ mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
|
|||
return 0;
|
||||
};
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -91,6 +91,20 @@ enum {
|
|||
|
||||
enum {
|
||||
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
|
||||
MLX5_OBJ_TYPE_MKEY = 0xff01,
|
||||
MLX5_OBJ_TYPE_QP = 0xff02,
|
||||
MLX5_OBJ_TYPE_PSV = 0xff03,
|
||||
MLX5_OBJ_TYPE_RMP = 0xff04,
|
||||
MLX5_OBJ_TYPE_XRC_SRQ = 0xff05,
|
||||
MLX5_OBJ_TYPE_RQ = 0xff06,
|
||||
MLX5_OBJ_TYPE_SQ = 0xff07,
|
||||
MLX5_OBJ_TYPE_TIR = 0xff08,
|
||||
MLX5_OBJ_TYPE_TIS = 0xff09,
|
||||
MLX5_OBJ_TYPE_DCT = 0xff0a,
|
||||
MLX5_OBJ_TYPE_XRQ = 0xff0b,
|
||||
MLX5_OBJ_TYPE_RQT = 0xff0e,
|
||||
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
|
||||
MLX5_OBJ_TYPE_CQ = 0xff10,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -106,6 +120,9 @@ enum {
|
|||
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
|
||||
MLX5_CMD_OP_SET_ISSI = 0x10b,
|
||||
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
|
||||
MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
|
||||
MLX5_CMD_OP_ALLOC_SF = 0x113,
|
||||
MLX5_CMD_OP_DEALLOC_SF = 0x114,
|
||||
MLX5_CMD_OP_CREATE_MKEY = 0x200,
|
||||
MLX5_CMD_OP_QUERY_MKEY = 0x201,
|
||||
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
|
||||
|
@ -713,7 +730,11 @@ struct mlx5_ifc_e_switch_cap_bits {
|
|||
u8 reserved_2b[0x6];
|
||||
u8 max_encap_header_size[0xa];
|
||||
|
||||
u8 reserved_40[0x7c0];
|
||||
u8 reserved_at_40[0xb];
|
||||
u8 log_max_esw_sf[0x5];
|
||||
u8 esw_sf_base_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x7a0];
|
||||
|
||||
};
|
||||
|
||||
|
@ -853,6 +874,12 @@ struct mlx5_ifc_device_mem_cap_bits {
|
|||
u8 reserved_at_180[0x680];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_device_event_cap_bits {
|
||||
u8 user_affiliated_events[4][0x40];
|
||||
|
||||
u8 user_unaffiliated_events[4][0x40];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
|
||||
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
|
||||
|
@ -946,6 +973,16 @@ struct mlx5_ifc_vector_calc_cap_bits {
|
|||
u8 reserved_at_c0[0x720];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_cap_bits {
|
||||
u8 tls_1_2_aes_gcm_128[0x1];
|
||||
u8 tls_1_3_aes_gcm_128[0x1];
|
||||
u8 tls_1_2_aes_gcm_256[0x1];
|
||||
u8 tls_1_3_aes_gcm_256[0x1];
|
||||
u8 reserved_at_4[0x1c];
|
||||
|
||||
u8 reserved_at_20[0x7e0];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
|
||||
MLX5_WQ_TYPE_CYCLIC = 0x1,
|
||||
|
@ -1010,7 +1047,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
|
||||
u8 log_max_srq_sz[0x8];
|
||||
u8 log_max_qp_sz[0x8];
|
||||
u8 reserved_at_90[0x8];
|
||||
u8 event_cap[0x1];
|
||||
u8 reserved_at_91[0x7];
|
||||
u8 prio_tag_required[0x1];
|
||||
u8 reserved_at_99[0x2];
|
||||
u8 log_max_qp[0x5];
|
||||
|
@ -1058,7 +1096,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 cc_modify_allowed[0x1];
|
||||
u8 start_pad[0x1];
|
||||
u8 cache_line_128byte[0x1];
|
||||
u8 reserved_at_165[0xa];
|
||||
u8 reserved_at_165[0x4];
|
||||
u8 rts2rts_qp_counters_set_id[0x1];
|
||||
u8 reserved_at_16a[0x5];
|
||||
u8 qcam_reg[0x1];
|
||||
u8 gid_table_size[0x10];
|
||||
|
||||
|
@ -1275,7 +1315,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
|
||||
u8 reserved_at_440[0x20];
|
||||
|
||||
u8 reserved_at_460[0x3];
|
||||
u8 tls[0x1];
|
||||
u8 reserved_at_461[0x2];
|
||||
u8 log_max_uctx[0x5];
|
||||
u8 reserved_at_468[0x3];
|
||||
u8 log_max_umem[0x5];
|
||||
|
@ -1300,7 +1341,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 max_geneve_tlv_option_data_len[0x5];
|
||||
u8 reserved_at_570[0x10];
|
||||
|
||||
u8 reserved_at_580[0x3c];
|
||||
u8 reserved_at_580[0x33];
|
||||
u8 log_max_dek[0x5];
|
||||
u8 reserved_at_5b8[0x4];
|
||||
u8 mini_cqe_resp_stride_index[0x1];
|
||||
u8 cqe_128_always[0x1];
|
||||
u8 cqe_compression_128[0x1];
|
||||
|
@ -1330,13 +1373,24 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 reserved_at_640[0x10];
|
||||
u8 num_q_monitor_counters[0x10];
|
||||
|
||||
u8 reserved_at_660[0x40];
|
||||
u8 reserved_at_660[0x20];
|
||||
|
||||
u8 sf[0x1];
|
||||
u8 sf_set_partition[0x1];
|
||||
u8 reserved_at_682[0x1];
|
||||
u8 log_max_sf[0x5];
|
||||
u8 reserved_at_688[0x8];
|
||||
u8 log_min_sf_size[0x8];
|
||||
u8 max_num_sf_partitions[0x8];
|
||||
|
||||
u8 uctx_cap[0x20];
|
||||
|
||||
u8 reserved_at_6c0[0x4];
|
||||
u8 flex_parser_id_geneve_tlv_option_0[0x4];
|
||||
u8 reserved_at_6c8[0x138];
|
||||
u8 reserved_at_6c8[0x28];
|
||||
u8 sf_base_id[0x10];
|
||||
|
||||
u8 reserved_at_700[0x100];
|
||||
};
|
||||
|
||||
enum mlx5_flow_destination_type {
|
||||
|
@ -2568,6 +2622,7 @@ union mlx5_ifc_hca_cap_union_bits {
|
|||
struct mlx5_ifc_qos_cap_bits qos_cap;
|
||||
struct mlx5_ifc_debug_cap_bits debug_cap;
|
||||
struct mlx5_ifc_fpga_cap_bits fpga_cap;
|
||||
struct mlx5_ifc_tls_cap_bits tls_cap;
|
||||
u8 reserved_at_0[0x8000];
|
||||
};
|
||||
|
||||
|
@ -2707,7 +2762,8 @@ struct mlx5_ifc_traffic_counter_bits {
|
|||
|
||||
struct mlx5_ifc_tisc_bits {
|
||||
u8 strict_lag_tx_port_affinity[0x1];
|
||||
u8 reserved_at_1[0x3];
|
||||
u8 tls_en[0x1];
|
||||
u8 reserved_at_1[0x2];
|
||||
u8 lag_tx_port_affinity[0x04];
|
||||
|
||||
u8 reserved_at_8[0x4];
|
||||
|
@ -2721,7 +2777,11 @@ struct mlx5_ifc_tisc_bits {
|
|||
|
||||
u8 reserved_at_140[0x8];
|
||||
u8 underlay_qpn[0x18];
|
||||
u8 reserved_at_160[0x3a0];
|
||||
|
||||
u8 reserved_at_160[0x8];
|
||||
u8 pd[0x18];
|
||||
|
||||
u8 reserved_at_180[0x380];
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -7404,9 +7464,9 @@ struct mlx5_ifc_create_eq_in_bits {
|
|||
|
||||
u8 reserved_at_280[0x40];
|
||||
|
||||
u8 event_bitmask[0x40];
|
||||
u8 event_bitmask[4][0x40];
|
||||
|
||||
u8 reserved_at_300[0x580];
|
||||
u8 reserved_at_3c0[0x4c0];
|
||||
|
||||
u8 pas[0][0x40];
|
||||
};
|
||||
|
@ -8524,7 +8584,7 @@ struct mlx5_ifc_mcam_access_reg_bits {
|
|||
u8 mcda[0x1];
|
||||
u8 mcc[0x1];
|
||||
u8 mcqi[0x1];
|
||||
u8 reserved_at_1f[0x1];
|
||||
u8 mcqs[0x1];
|
||||
|
||||
u8 regs_95_to_87[0x9];
|
||||
u8 mpegc[0x1];
|
||||
|
@ -9016,6 +9076,24 @@ struct mlx5_ifc_mtppse_reg_bits {
|
|||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcqs_reg_bits {
|
||||
u8 last_index_flag[0x1];
|
||||
u8 reserved_at_1[0x7];
|
||||
u8 fw_device[0x8];
|
||||
u8 component_index[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 identifier[0x10];
|
||||
|
||||
u8 reserved_at_40[0x17];
|
||||
u8 component_status[0x5];
|
||||
u8 component_update_state[0x4];
|
||||
|
||||
u8 last_update_state_changer_type[0x4];
|
||||
u8 last_update_state_changer_host_id[0x4];
|
||||
u8 reserved_at_68[0x18];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcqi_cap_bits {
|
||||
u8 supported_info_bitmask[0x20];
|
||||
|
||||
|
@ -9036,6 +9114,43 @@ struct mlx5_ifc_mcqi_cap_bits {
|
|||
u8 reserved_at_86[0x1a];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcqi_version_bits {
|
||||
u8 reserved_at_0[0x2];
|
||||
u8 build_time_valid[0x1];
|
||||
u8 user_defined_time_valid[0x1];
|
||||
u8 reserved_at_4[0x14];
|
||||
u8 version_string_length[0x8];
|
||||
|
||||
u8 version[0x20];
|
||||
|
||||
u8 build_time[0x40];
|
||||
|
||||
u8 user_defined_time[0x40];
|
||||
|
||||
u8 build_tool_version[0x20];
|
||||
|
||||
u8 reserved_at_e0[0x20];
|
||||
|
||||
u8 version_string[92][0x8];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcqi_activation_method_bits {
|
||||
u8 pending_server_ac_power_cycle[0x1];
|
||||
u8 pending_server_dc_power_cycle[0x1];
|
||||
u8 pending_server_reboot[0x1];
|
||||
u8 pending_fw_reset[0x1];
|
||||
u8 auto_activate[0x1];
|
||||
u8 all_hosts_sync[0x1];
|
||||
u8 device_hw_reset[0x1];
|
||||
u8 reserved_at_7[0x19];
|
||||
};
|
||||
|
||||
union mlx5_ifc_mcqi_reg_data_bits {
|
||||
struct mlx5_ifc_mcqi_cap_bits mcqi_caps;
|
||||
struct mlx5_ifc_mcqi_version_bits mcqi_version;
|
||||
struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcqi_reg_bits {
|
||||
u8 read_pending_component[0x1];
|
||||
u8 reserved_at_1[0xf];
|
||||
|
@ -9053,7 +9168,7 @@ struct mlx5_ifc_mcqi_reg_bits {
|
|||
u8 reserved_at_a0[0x10];
|
||||
u8 data_size[0x10];
|
||||
|
||||
u8 data[0][0x20];
|
||||
union mlx5_ifc_mcqi_reg_data_bits data[0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcc_reg_bits {
|
||||
|
@ -9750,7 +9865,8 @@ struct mlx5_ifc_mtrc_ctrl_bits {
|
|||
|
||||
struct mlx5_ifc_host_params_context_bits {
|
||||
u8 host_number[0x8];
|
||||
u8 reserved_at_8[0x8];
|
||||
u8 reserved_at_8[0x7];
|
||||
u8 host_pf_disabled[0x1];
|
||||
u8 host_num_of_vfs[0x10];
|
||||
|
||||
u8 host_total_vfs[0x10];
|
||||
|
@ -9786,6 +9902,165 @@ struct mlx5_ifc_query_esw_functions_out_bits {
|
|||
struct mlx5_ifc_host_params_context_bits host_params_context;
|
||||
|
||||
u8 reserved_at_280[0x180];
|
||||
u8 host_sf_enable[0][0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_sf_partition_bits {
|
||||
u8 reserved_at_0[0x10];
|
||||
u8 log_num_sf[0x8];
|
||||
u8 log_sf_bar_size[0x8];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_sf_partitions_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x18];
|
||||
u8 num_sf_partitions[0x8];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
|
||||
struct mlx5_ifc_sf_partition_bits sf_partition[0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_sf_partitions_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_dealloc_sf_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_dealloc_sf_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_alloc_sf_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_alloc_sf_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_affiliated_event_header_bits {
|
||||
u8 reserved_at_0[0x10];
|
||||
u8 obj_type[0x10];
|
||||
|
||||
u8 obj_id[0x20];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc),
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_encryption_key_obj_bits {
|
||||
u8 modify_field_select[0x40];
|
||||
|
||||
u8 reserved_at_40[0x14];
|
||||
u8 key_size[0x4];
|
||||
u8 reserved_at_58[0x4];
|
||||
u8 key_type[0x4];
|
||||
|
||||
u8 reserved_at_60[0x8];
|
||||
u8 pd[0x18];
|
||||
|
||||
u8 reserved_at_80[0x180];
|
||||
u8 key[8][0x20];
|
||||
|
||||
u8 reserved_at_300[0x500];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_create_encryption_key_in_bits {
|
||||
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
|
||||
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
|
||||
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_static_params_bits {
|
||||
u8 const_2[0x2];
|
||||
u8 tls_version[0x4];
|
||||
u8 const_1[0x2];
|
||||
u8 reserved_at_8[0x14];
|
||||
u8 encryption_standard[0x4];
|
||||
|
||||
u8 reserved_at_20[0x20];
|
||||
|
||||
u8 initial_record_number[0x40];
|
||||
|
||||
u8 resync_tcp_sn[0x20];
|
||||
|
||||
u8 gcm_iv[0x20];
|
||||
|
||||
u8 implicit_iv[0x40];
|
||||
|
||||
u8 reserved_at_100[0x8];
|
||||
u8 dek_index[0x18];
|
||||
|
||||
u8 reserved_at_120[0xe0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_progress_params_bits {
|
||||
u8 valid[0x1];
|
||||
u8 reserved_at_1[0x7];
|
||||
u8 pd[0x18];
|
||||
|
||||
u8 next_record_tcp_sn[0x20];
|
||||
|
||||
u8 hw_resync_tcp_sn[0x20];
|
||||
|
||||
u8 record_tracker_state[0x2];
|
||||
u8 auth_state[0x2];
|
||||
u8 reserved_at_64[0x4];
|
||||
u8 hw_offset_record_number[0x18];
|
||||
};
|
||||
|
||||
#endif /* MLX5_IFC_H */
|
||||
|
|
|
@ -202,7 +202,12 @@ struct mlx5_wqe_ctrl_seg {
|
|||
u8 signature;
|
||||
u8 rsvd[2];
|
||||
u8 fm_ce_se;
|
||||
__be32 imm;
|
||||
union {
|
||||
__be32 general_id;
|
||||
__be32 imm;
|
||||
__be32 umr_mkey;
|
||||
__be32 tisn;
|
||||
};
|
||||
};
|
||||
|
||||
#define MLX5_WQE_CTRL_DS_MASK 0x3f
|
||||
|
|
|
@ -44,9 +44,6 @@
|
|||
MLX5_VPORT_UPLINK_PLACEHOLDER + \
|
||||
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
|
||||
|
||||
#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \
|
||||
mlx5_core_max_vfs(mdev))
|
||||
|
||||
#define MLX5_VPORT_MANAGER(mdev) \
|
||||
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
|
||||
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|
||||
|
@ -58,6 +55,7 @@ enum {
|
|||
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
|
||||
};
|
||||
|
||||
/* Vport number for each function must keep unchanged */
|
||||
enum {
|
||||
MLX5_VPORT_PF = 0x0,
|
||||
MLX5_VPORT_FIRST_VF = 0x1,
|
||||
|
@ -69,7 +67,8 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
|
|||
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
u16 vport, u8 other_vport, u8 state);
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr);
|
||||
u16 vport, bool other, u8 *addr);
|
||||
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
|
||||
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *min_inline);
|
||||
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
|
||||
|
|
Loading…
Reference in New Issue