net/mlx5: Move all IRQ logic to pci_irq.c
Finalize IRQ separation and expose irq interface. Signed-off-by: Yuval Avnery <yuvalav@mellanox.com> Reviewed-by: Parav Pandit <parav@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
bfb49549ea
commit
256cf690af
|
@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
|
|||
#
|
||||
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
||||
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
|
||||
transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
|
||||
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
|
||||
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
|
||||
lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
|
||||
|
||||
|
|
|
@ -71,20 +71,6 @@ enum {
|
|||
|
||||
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
|
||||
|
||||
struct mlx5_irq_info {
|
||||
struct atomic_notifier_head nh;
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
};
|
||||
|
||||
struct mlx5_irq_table {
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int nvec;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *rmap;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct mlx5_eq_table {
|
||||
struct list_head comp_eqs_list;
|
||||
struct mlx5_eq_async pages_eq;
|
||||
|
@ -114,157 +100,6 @@ struct mlx5_eq_table {
|
|||
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
|
||||
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
|
||||
|
||||
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *irq_table;
|
||||
|
||||
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
|
||||
if (!irq_table)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->priv.irq_table = irq_table;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
kvfree(dev->priv.irq_table);
|
||||
}
|
||||
|
||||
static int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
|
||||
{
|
||||
return table->nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
}
|
||||
|
||||
static struct mlx5_irq_info *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
|
||||
{
|
||||
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
|
||||
|
||||
return &irq_table->irq_info[vecidx];
|
||||
}
|
||||
|
||||
static int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_irq_info *irq_info;
|
||||
|
||||
irq_info = &irq_table->irq_info[vecidx];
|
||||
return atomic_notifier_chain_register(&irq_info->nh, nb);
|
||||
}
|
||||
|
||||
static int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_irq_info *irq_info;
|
||||
|
||||
irq_info = &irq_table->irq_info[vecidx];
|
||||
return atomic_notifier_chain_unregister(&irq_info->nh, nb);
|
||||
}
|
||||
|
||||
static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
|
||||
{
|
||||
atomic_notifier_call_chain(nh, 0, NULL);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void irq_set_name(char *name, int vecidx)
|
||||
{
|
||||
switch (vecidx) {
|
||||
case MLX5_EQ_CMD_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_cmd_eq");
|
||||
break;
|
||||
case MLX5_EQ_ASYNC_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async_eq");
|
||||
break;
|
||||
case MLX5_EQ_PAGEREQ_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_pages_eq");
|
||||
break;
|
||||
case MLX5_EQ_PFAULT_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_ib_page_fault_eq");
|
||||
break;
|
||||
default:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
|
||||
vecidx - MLX5_EQ_VEC_COMP_BASE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
|
||||
{
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
|
||||
int irqn = pci_irq_vector(dev->pdev, i);
|
||||
|
||||
irq_set_name(name, i);
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&irq_info->nh);
|
||||
snprintf(irq_info->name, MLX5_MAX_IRQ_NAME,
|
||||
"%s@pci:%s", name, pci_name(dev->pdev));
|
||||
err = request_irq(irqn, mlx5_irq_int_handler, 0, irq_info->name,
|
||||
&irq_info->nh);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to request irq\n");
|
||||
goto err_request_irq;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_request_irq:
|
||||
for (; i >= 0; i--) {
|
||||
struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
|
||||
int irqn = pci_irq_vector(dev->pdev, i);
|
||||
|
||||
free_irq(irqn, &irq_info->nh);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void irq_clear_rmap(struct mlx5_core_dev *dev)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
|
||||
|
||||
free_irq_cpu_rmap(irq_table->rmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int irq_set_rmap(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int err = 0;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
|
||||
int num_affinity_vec;
|
||||
int vecidx;
|
||||
|
||||
num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
|
||||
irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
|
||||
if (!irq_table->rmap) {
|
||||
err = -ENOMEM;
|
||||
mlx5_core_err(mdev, "failed to allocate cpu_rmap. err %d", err);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
vecidx = MLX5_EQ_VEC_COMP_BASE;
|
||||
for (; vecidx < irq_table->nvec; vecidx++) {
|
||||
err = irq_cpu_rmap_add(irq_table->rmap,
|
||||
pci_irq_vector(mdev->pdev, vecidx));
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d", err);
|
||||
goto err_irq_cpu_rmap_add;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_irq_cpu_rmap_add:
|
||||
irq_clear_rmap(mdev);
|
||||
err_out:
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
|
||||
|
@ -868,75 +703,6 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx5_eq_update_ci);
|
||||
|
||||
/* Completion EQs */
|
||||
|
||||
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_priv *priv = &mdev->priv;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int irq;
|
||||
|
||||
irq_info = mlx5_irq_get(mdev, vecidx);
|
||||
irq = pci_irq_vector(mdev->pdev, vecidx);
|
||||
|
||||
if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
|
||||
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
irq_info->mask);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
irq_set_affinity_hint(irq, irq_info->mask))
|
||||
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int irq;
|
||||
|
||||
irq_info = mlx5_irq_get(mdev, vecidx);
|
||||
irq = pci_irq_vector(mdev->pdev, vecidx);
|
||||
irq_set_affinity_hint(irq, NULL);
|
||||
free_cpumask_var(irq_info->mask);
|
||||
}
|
||||
|
||||
static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
err = set_comp_irq_affinity_hint(mdev, i);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
for (i--; i >= 0; i--)
|
||||
clear_comp_irq_affinity_hint(mdev, i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++)
|
||||
clear_comp_irq_affinity_hint(mdev, i);
|
||||
}
|
||||
|
||||
static void destroy_comp_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
|
@ -1031,12 +797,6 @@ unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx5_comp_vectors_count);
|
||||
|
||||
static struct cpumask *
|
||||
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
|
||||
{
|
||||
return irq_table->irq_info[vecidx].mask;
|
||||
}
|
||||
|
||||
struct cpumask *
|
||||
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
|
||||
{
|
||||
|
@ -1048,11 +808,6 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
|
|||
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
static struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
|
||||
{
|
||||
return irq_table->rmap;
|
||||
}
|
||||
|
||||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
|
||||
|
@ -1082,88 +837,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
|
|||
mutex_unlock(&table->lock);
|
||||
}
|
||||
|
||||
static void unrequest_irqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *table = dev->priv.irq_table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->nvec; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i),
|
||||
&mlx5_irq_get(dev, i)->nh);
|
||||
}
|
||||
|
||||
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_irq_table *table = priv->irq_table;
|
||||
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
|
||||
MLX5_CAP_GEN(dev, max_num_eqs) :
|
||||
1 << MLX5_CAP_GEN(dev, log_max_eq);
|
||||
int nvec;
|
||||
int err;
|
||||
|
||||
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
|
||||
MLX5_EQ_VEC_COMP_BASE;
|
||||
nvec = min_t(int, nvec, num_eqs);
|
||||
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
|
||||
return -ENOMEM;
|
||||
|
||||
table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
|
||||
if (!table->irq_info)
|
||||
return -ENOMEM;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
|
||||
nvec, PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
err = nvec;
|
||||
goto err_free_irq_info;
|
||||
}
|
||||
|
||||
table->nvec = nvec;
|
||||
|
||||
err = irq_set_rmap(dev);
|
||||
if (err)
|
||||
goto err_set_rmap;
|
||||
|
||||
err = request_irqs(dev, nvec);
|
||||
if (err)
|
||||
goto err_request_irqs;
|
||||
|
||||
err = set_comp_irq_affinity_hints(dev);
|
||||
if (err)
|
||||
goto err_set_affinity;
|
||||
|
||||
return 0;
|
||||
|
||||
err_set_affinity:
|
||||
unrequest_irqs(dev);
|
||||
err_request_irqs:
|
||||
irq_clear_rmap(dev);
|
||||
err_set_rmap:
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
err_free_irq_info:
|
||||
kfree(table->irq_info);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *table = dev->priv.irq_table;
|
||||
int i;
|
||||
|
||||
/* free_irq requires that affinity and rmap will be cleared
|
||||
* before calling it. This is why there is asymmetry with set_rmap
|
||||
* which should be called after alloc_irq but before request_irq.
|
||||
*/
|
||||
irq_clear_rmap(dev);
|
||||
clear_comp_irqs_affinity_hints(dev);
|
||||
for (i = 0; i < table->nvec; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i),
|
||||
&mlx5_irq_get(dev, i)->nh);
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
kfree(table->irq_info);
|
||||
}
|
||||
|
||||
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/mlx5/eq.h>
|
||||
#include <linux/mlx5/cq.h>
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
|
||||
|
||||
struct mlx5_eq_tasklet {
|
||||
|
|
|
@ -157,6 +157,14 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
|
|||
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
|
||||
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
|
||||
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb);
|
||||
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb);
|
||||
struct cpumask *
|
||||
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
|
||||
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
|
||||
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
|
||||
|
||||
int mlx5_events_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -0,0 +1,345 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/* Copyright (c) 2019 Mellanox Technologies. */
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
#include <linux/cpu_rmap.h>
|
||||
#endif
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
|
||||
struct mlx5_irq_info {
|
||||
struct atomic_notifier_head nh;
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
};
|
||||
|
||||
struct mlx5_irq_table {
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int nvec;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *rmap;
|
||||
#endif
|
||||
};
|
||||
|
||||
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *irq_table;
|
||||
|
||||
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
|
||||
if (!irq_table)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->priv.irq_table = irq_table;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
kvfree(dev->priv.irq_table);
|
||||
}
|
||||
|
||||
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
|
||||
{
|
||||
return table->nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
}
|
||||
|
||||
static struct mlx5_irq_info *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
|
||||
{
|
||||
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
|
||||
|
||||
return &irq_table->irq_info[vecidx];
|
||||
}
|
||||
|
||||
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_irq_info *irq_info;
|
||||
|
||||
irq_info = &irq_table->irq_info[vecidx];
|
||||
return atomic_notifier_chain_register(&irq_info->nh, nb);
|
||||
}
|
||||
|
||||
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
|
||||
struct notifier_block *nb)
|
||||
{
|
||||
struct mlx5_irq_info *irq_info;
|
||||
|
||||
irq_info = &irq_table->irq_info[vecidx];
|
||||
return atomic_notifier_chain_unregister(&irq_info->nh, nb);
|
||||
}
|
||||
|
||||
static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
|
||||
{
|
||||
atomic_notifier_call_chain(nh, 0, NULL);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void irq_set_name(char *name, int vecidx)
|
||||
{
|
||||
switch (vecidx) {
|
||||
case MLX5_EQ_CMD_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_cmd_eq");
|
||||
break;
|
||||
case MLX5_EQ_ASYNC_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async_eq");
|
||||
break;
|
||||
case MLX5_EQ_PAGEREQ_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_pages_eq");
|
||||
break;
|
||||
case MLX5_EQ_PFAULT_IDX:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_ib_page_fault_eq");
|
||||
break;
|
||||
default:
|
||||
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
|
||||
vecidx - MLX5_EQ_VEC_COMP_BASE);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
|
||||
{
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
|
||||
int irqn = pci_irq_vector(dev->pdev, i);
|
||||
|
||||
irq_set_name(name, i);
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&irq_info->nh);
|
||||
snprintf(irq_info->name, MLX5_MAX_IRQ_NAME,
|
||||
"%s@pci:%s", name, pci_name(dev->pdev));
|
||||
err = request_irq(irqn, mlx5_irq_int_handler, 0, irq_info->name,
|
||||
&irq_info->nh);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to request irq\n");
|
||||
goto err_request_irq;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_request_irq:
|
||||
for (; i >= 0; i--) {
|
||||
struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
|
||||
int irqn = pci_irq_vector(dev->pdev, i);
|
||||
|
||||
free_irq(irqn, &irq_info->nh);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void irq_clear_rmap(struct mlx5_core_dev *dev)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
|
||||
|
||||
free_irq_cpu_rmap(irq_table->rmap);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int irq_set_rmap(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int err = 0;
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
|
||||
int num_affinity_vec;
|
||||
int vecidx;
|
||||
|
||||
num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
|
||||
irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
|
||||
if (!irq_table->rmap) {
|
||||
err = -ENOMEM;
|
||||
mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
vecidx = MLX5_EQ_VEC_COMP_BASE;
|
||||
for (; vecidx < irq_table->nvec; vecidx++) {
|
||||
err = irq_cpu_rmap_add(irq_table->rmap,
|
||||
pci_irq_vector(mdev->pdev, vecidx));
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
|
||||
err);
|
||||
goto err_irq_cpu_rmap_add;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_irq_cpu_rmap_add:
|
||||
irq_clear_rmap(mdev);
|
||||
err_out:
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Completion IRQ vectors */
|
||||
|
||||
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int irq;
|
||||
|
||||
irq_info = mlx5_irq_get(mdev, vecidx);
|
||||
irq = pci_irq_vector(mdev->pdev, vecidx);
|
||||
if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
|
||||
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
|
||||
irq_info->mask);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
irq_set_affinity_hint(irq, irq_info->mask))
|
||||
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
|
||||
irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
||||
{
|
||||
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
int irq;
|
||||
|
||||
irq_info = mlx5_irq_get(mdev, vecidx);
|
||||
irq = pci_irq_vector(mdev->pdev, vecidx);
|
||||
irq_set_affinity_hint(irq, NULL);
|
||||
free_cpumask_var(irq_info->mask);
|
||||
}
|
||||
|
||||
static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
err = set_comp_irq_affinity_hint(mdev, i);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
for (i--; i >= 0; i--)
|
||||
clear_comp_irq_affinity_hint(mdev, i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++)
|
||||
clear_comp_irq_affinity_hint(mdev, i);
|
||||
}
|
||||
|
||||
struct cpumask *
|
||||
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
|
||||
{
|
||||
return irq_table->irq_info[vecidx].mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
|
||||
{
|
||||
return irq_table->rmap;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void unrequest_irqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *table = dev->priv.irq_table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->nvec; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i),
|
||||
&mlx5_irq_get(dev, i)->nh);
|
||||
}
|
||||
|
||||
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_irq_table *table = priv->irq_table;
|
||||
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
|
||||
MLX5_CAP_GEN(dev, max_num_eqs) :
|
||||
1 << MLX5_CAP_GEN(dev, log_max_eq);
|
||||
int nvec;
|
||||
int err;
|
||||
|
||||
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
|
||||
MLX5_EQ_VEC_COMP_BASE;
|
||||
nvec = min_t(int, nvec, num_eqs);
|
||||
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
|
||||
return -ENOMEM;
|
||||
|
||||
table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
|
||||
if (!table->irq_info)
|
||||
return -ENOMEM;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
|
||||
nvec, PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
err = nvec;
|
||||
goto err_free_irq_info;
|
||||
}
|
||||
|
||||
table->nvec = nvec;
|
||||
|
||||
err = irq_set_rmap(dev);
|
||||
if (err)
|
||||
goto err_set_rmap;
|
||||
|
||||
err = request_irqs(dev, nvec);
|
||||
if (err)
|
||||
goto err_request_irqs;
|
||||
|
||||
err = set_comp_irq_affinity_hints(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
|
||||
goto err_set_affinity;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_set_affinity:
|
||||
unrequest_irqs(dev);
|
||||
err_request_irqs:
|
||||
irq_clear_rmap(dev);
|
||||
err_set_rmap:
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
err_free_irq_info:
|
||||
kfree(table->irq_info);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_irq_table *table = dev->priv.irq_table;
|
||||
int i;
|
||||
|
||||
/* free_irq requires that affinity and rmap will be cleared
|
||||
* before calling it. This is why there is asymmetry with set_rmap
|
||||
* which should be called after alloc_irq but before request_irq.
|
||||
*/
|
||||
irq_clear_rmap(dev);
|
||||
clear_comp_irqs_affinity_hints(dev);
|
||||
for (i = 0; i < table->nvec; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i),
|
||||
&mlx5_irq_get(dev, i)->nh);
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
kfree(table->irq_info);
|
||||
}
|
||||
|
Loading…
Reference in New Issue