net/mlx5: Use a single IRQ for all async EQs
The patch modifies the IRQ allocation so that all async EQs are assigned to the same IRQ resulting in more available IRQs for completion EQs. The changes are using the support for IRQ sharing and EQ polling budget that was introduced in previous patches so when the shared interrupt is triggered, the kernel will serially call the handler of each of the sharing EQs with a certain budget of EQEs to poll in order to prevent starvation. Signed-off-by: Ariel Levkovich <lariel@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
cf49f41d29
commit
81bfa20603
|
@ -1557,7 +1557,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
|
|||
|
||||
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_PFAULT_IDX,
|
||||
.irq_index = 0,
|
||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
|
||||
.nent = MLX5_IB_NUM_PF_EQE,
|
||||
.nb = &eq->irq_nb,
|
||||
|
|
|
@ -250,7 +250,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||
struct mlx5_cq_table *cq_table = &eq->cq_table;
|
||||
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
u8 vecidx = param->index;
|
||||
u8 vecidx = param->irq_index;
|
||||
__be64 *pas;
|
||||
void *eqc;
|
||||
int inlen;
|
||||
|
@ -435,8 +435,9 @@ static int create_async_eq(struct mlx5_core_dev *dev,
|
|||
int err;
|
||||
|
||||
mutex_lock(&eq_table->lock);
|
||||
if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
|
||||
err = -ENOSPC;
|
||||
/* Async EQs must share irq index 0 */
|
||||
if (param->irq_index != 0) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -540,7 +541,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
|
||||
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_CMD_IDX,
|
||||
.irq_index = 0,
|
||||
.mask = 1ull << MLX5_EVENT_TYPE_CMD,
|
||||
.nent = MLX5_NUM_CMD_EQE,
|
||||
.nb = &table->cmd_eq.irq_nb,
|
||||
|
@ -555,7 +556,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
|
||||
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_ASYNC_IDX,
|
||||
.irq_index = 0,
|
||||
.mask = gather_async_events_mask(dev),
|
||||
.nent = MLX5_NUM_ASYNC_EQE,
|
||||
.nb = &table->async_eq.irq_nb,
|
||||
|
@ -568,7 +569,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
|||
|
||||
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = MLX5_EQ_PAGEREQ_IDX,
|
||||
.irq_index = 0,
|
||||
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
|
||||
.nent = /* TODO: sriov max_vf + */ 1,
|
||||
.nb = &table->pages_eq.irq_nb,
|
||||
|
@ -731,7 +732,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
|||
ncomp_eqs = table->num_comp_eqs;
|
||||
nent = MLX5_COMP_EQ_SIZE;
|
||||
for (i = 0; i < ncomp_eqs; i++) {
|
||||
int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
|
||||
int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
|
||||
struct mlx5_eq_param param = {};
|
||||
|
||||
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
|
||||
|
@ -748,7 +749,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
|||
|
||||
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
|
||||
param = (struct mlx5_eq_param) {
|
||||
.index = vecidx,
|
||||
.irq_index = vecidx,
|
||||
.mask = 0,
|
||||
.nent = nent,
|
||||
.nb = &eq->irq_nb,
|
||||
|
@ -800,7 +801,7 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count);
|
|||
struct cpumask *
|
||||
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
int vecidx = vector + MLX5_EQ_VEC_COMP_BASE;
|
||||
int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
|
||||
|
||||
return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
|
||||
vecidx);
|
||||
|
|
|
@ -45,7 +45,7 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
|
|||
|
||||
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
|
||||
{
|
||||
return table->nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
|
||||
}
|
||||
|
||||
static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
|
||||
|
@ -81,24 +81,14 @@ static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
|
|||
|
||||
static void irq_set_name(char *name, int vecidx)
|
||||
{
|
||||
switch (vecidx) {
|
||||
case MLX5_EQ_CMD_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_cmd_eq");
|
||||
break;
|
||||
case MLX5_EQ_ASYNC_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async_eq");
|
||||
break;
|
||||
case MLX5_EQ_PAGEREQ_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_pages_eq");
|
||||
break;
|
||||
case MLX5_EQ_PFAULT_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_ib_page_fault_eq");
|
||||
break;
|
||||
default:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
|
||||
vecidx - MLX5_EQ_VEC_COMP_BASE);
|
||||
break;
|
||||
if (vecidx == 0) {
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
|
||||
vecidx - MLX5_IRQ_VEC_COMP_BASE);
|
||||
return;
|
||||
}
|
||||
|
||||
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
|
||||
|
@ -159,7 +149,7 @@ static int irq_set_rmap(struct mlx5_core_dev *mdev)
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
vecidx = MLX5_EQ_VEC_COMP_BASE;
|
||||
vecidx = MLX5_IRQ_VEC_COMP_BASE;
|
||||
for (; vecidx < irq_table->nvec; vecidx++) {
|
||||
err = irq_cpu_rmap_add(irq_table->rmap,
|
||||
pci_irq_vector(mdev->pdev, vecidx));
|
||||
|
@ -182,7 +172,7 @@ err_out:
|
|||
|
||||
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_irq *irq;
|
||||
int irqn;
|
||||
|
||||
|
@ -205,7 +195,7 @@ static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
|
||||
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_irq *irq;
|
||||
int irqn;
|
||||
|
||||
|
@ -279,16 +269,16 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
|
|||
int err;
|
||||
|
||||
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
|
||||
MLX5_EQ_VEC_COMP_BASE;
|
||||
MLX5_IRQ_VEC_COMP_BASE;
|
||||
nvec = min_t(int, nvec, num_eqs);
|
||||
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
|
||||
if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
|
||||
return -ENOMEM;
|
||||
|
||||
table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
|
||||
if (!table->irq)
|
||||
return -ENOMEM;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
|
||||
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
|
||||
nvec, PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
err = nvec;
|
||||
|
|
|
@ -4,17 +4,7 @@
|
|||
#ifndef MLX5_CORE_EQ_H
|
||||
#define MLX5_CORE_EQ_H
|
||||
|
||||
enum {
|
||||
MLX5_EQ_PAGEREQ_IDX = 0,
|
||||
MLX5_EQ_CMD_IDX = 1,
|
||||
MLX5_EQ_ASYNC_IDX = 2,
|
||||
/* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
|
||||
MLX5_EQ_PFAULT_IDX = 3,
|
||||
MLX5_EQ_MAX_ASYNC_EQS,
|
||||
/* completion eqs vector indices start here */
|
||||
MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
|
||||
};
|
||||
|
||||
#define MLX5_IRQ_VEC_COMP_BASE 1
|
||||
#define MLX5_NUM_CMD_EQE (32)
|
||||
#define MLX5_NUM_ASYNC_EQE (0x1000)
|
||||
#define MLX5_NUM_SPARE_EQE (0x80)
|
||||
|
@ -23,7 +13,7 @@ struct mlx5_eq;
|
|||
struct mlx5_core_dev;
|
||||
|
||||
struct mlx5_eq_param {
|
||||
u8 index;
|
||||
u8 irq_index;
|
||||
int nent;
|
||||
u64 mask;
|
||||
struct notifier_block *nb;
|
||||
|
|
Loading…
Reference in New Issue