IB/hfi1: Make the MSIx resource allocation a bit more flexible
The current method of allocating MSIx resources is a bit cumbersome, and not very easily added to. Refactor and re-order the code paths into a more consistent interface. Update the interface so that allocations are not order dependent. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Sadanand Warrier <sadanand.warrier@intel.com> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
09e71899b9
commit
6eb4eb10fb
|
@ -817,10 +817,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
|
|||
set = &entry->def_intr;
|
||||
cpumask_set_cpu(cpu, &set->mask);
|
||||
cpumask_set_cpu(cpu, &set->used);
|
||||
for (i = 0; i < dd->num_msix_entries; i++) {
|
||||
for (i = 0; i < dd->msix_info.max_requested; i++) {
|
||||
struct hfi1_msix_entry *other_msix;
|
||||
|
||||
other_msix = &dd->msix_entries[i];
|
||||
other_msix = &dd->msix_info.msix_entries[i];
|
||||
if (other_msix->type != IRQ_SDMA || other_msix == msix)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -13099,6 +13099,35 @@ void reset_interrupts(struct hfi1_devdata *dd)
|
|||
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* set_up_interrupts() - Initialize the IRQ resources and state
|
||||
* @dd: valid devdata
|
||||
*
|
||||
*/
|
||||
static int set_up_interrupts(struct hfi1_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* mask all interrupts */
|
||||
set_intr_state(dd, 0);
|
||||
/* clear all pending interrupts */
|
||||
clear_all_interrupts(dd);
|
||||
|
||||
/* reset general handler mask, chip MSI-X mappings */
|
||||
reset_interrupts(dd);
|
||||
|
||||
/* ask for MSI-X interrupts */
|
||||
ret = msix_initialize(dd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = msix_request_irqs(dd);
|
||||
if (ret)
|
||||
msix_clean_up_interrupts(dd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up context values in dd. Sets:
|
||||
*
|
||||
|
@ -14966,7 +14995,7 @@ bail_free_cntrs:
|
|||
free_cntrs(dd);
|
||||
bail_clear_intr:
|
||||
hfi1_comp_vectors_clean_up(dd);
|
||||
hfi1_clean_up_interrupts(dd);
|
||||
msix_clean_up_interrupts(dd);
|
||||
bail_cleanup:
|
||||
hfi1_pcie_ddcleanup(dd);
|
||||
bail_free:
|
||||
|
|
|
@ -668,6 +668,14 @@ struct hfi1_msix_entry {
|
|||
struct irq_affinity_notify notify;
|
||||
};
|
||||
|
||||
struct hfi1_msix_info {
|
||||
/* lock to synchronize in_use_msix access */
|
||||
spinlock_t msix_lock;
|
||||
DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
|
||||
struct hfi1_msix_entry *msix_entries;
|
||||
u16 max_requested;
|
||||
};
|
||||
|
||||
/* per-SL CCA information */
|
||||
struct cca_timer {
|
||||
struct hrtimer hrtimer;
|
||||
|
@ -993,7 +1001,6 @@ struct hfi1_vnic_data {
|
|||
struct idr vesw_idr;
|
||||
u8 rmt_start;
|
||||
u8 num_ctxt;
|
||||
u32 msix_idx;
|
||||
};
|
||||
|
||||
struct hfi1_vnic_vport_info;
|
||||
|
@ -1207,9 +1214,7 @@ struct hfi1_devdata {
|
|||
struct diag_client *diag_client;
|
||||
|
||||
/* MSI-X information */
|
||||
struct hfi1_msix_entry *msix_entries;
|
||||
u32 num_msix_entries;
|
||||
u32 first_dyn_msix_idx;
|
||||
struct hfi1_msix_info msix_info;
|
||||
|
||||
/* general interrupt: mask of handled interrupts */
|
||||
u64 gi_mask[CCE_NUM_INT_CSRS];
|
||||
|
|
|
@ -1052,7 +1052,7 @@ static void shutdown_device(struct hfi1_devdata *dd)
|
|||
|
||||
/* mask and clean up interrupts, but not errors */
|
||||
set_intr_state(dd, 0);
|
||||
hfi1_clean_up_interrupts(dd);
|
||||
msix_clean_up_interrupts(dd);
|
||||
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
ppd = dd->pport + pidx;
|
||||
|
@ -1738,7 +1738,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
|
||||
|
||||
if (initfail || ret) {
|
||||
hfi1_clean_up_interrupts(dd);
|
||||
msix_clean_up_interrupts(dd);
|
||||
stop_timers(dd);
|
||||
flush_workqueue(ib_wq);
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
|
|
|
@ -47,284 +47,301 @@
|
|||
*/
|
||||
|
||||
#include "hfi.h"
|
||||
#include "affinity.h"
|
||||
#include "sdma.h"
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* - actual number of interrupts allocated or
|
||||
* - error
|
||||
/**
|
||||
* msix_initialize() - Calculate, request and configure MSIx IRQs
|
||||
* @dd: valid hfi1 devdata
|
||||
*
|
||||
*/
|
||||
int request_msix(struct hfi1_devdata *dd, u32 msireq)
|
||||
{
|
||||
int nvec;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
|
||||
return nvec;
|
||||
}
|
||||
|
||||
return nvec;
|
||||
}
|
||||
|
||||
int set_up_interrupts(struct hfi1_devdata *dd)
|
||||
int msix_initialize(struct hfi1_devdata *dd)
|
||||
{
|
||||
u32 total;
|
||||
int ret, request;
|
||||
int ret;
|
||||
struct hfi1_msix_entry *entries;
|
||||
|
||||
/*
|
||||
* Interrupt count:
|
||||
* 1 general, "slow path" interrupt (includes the SDMA engines
|
||||
* slow source, SDMACleanupDone)
|
||||
* N interrupts - one per used SDMA engine
|
||||
* M interrupt - one per kernel receive context
|
||||
* V interrupt - one for each VNIC context
|
||||
* MSIx interrupt count:
|
||||
* one for the general, "slow path" interrupt
|
||||
* one per used SDMA engine
|
||||
* one per kernel receive context
|
||||
* one for each VNIC context
|
||||
* ...any new IRQs should be added here.
|
||||
*/
|
||||
total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
|
||||
|
||||
/* ask for MSI-X interrupts */
|
||||
request = request_msix(dd, total);
|
||||
if (request < 0) {
|
||||
ret = request;
|
||||
goto fail;
|
||||
} else {
|
||||
dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
|
||||
GFP_KERNEL);
|
||||
if (!dd->msix_entries) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
/* using MSI-X */
|
||||
dd->num_msix_entries = total;
|
||||
dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
|
||||
if (total >= CCE_NUM_MSIX_VECTORS)
|
||||
return -EINVAL;
|
||||
|
||||
ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
|
||||
if (ret < 0) {
|
||||
dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mask all interrupts */ set_intr_state(dd, 0);
|
||||
/* clear all pending interrupts */
|
||||
clear_all_interrupts(dd);
|
||||
entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
|
||||
GFP_KERNEL);
|
||||
if (!entries) {
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* reset general handler mask, chip MSI-X mappings */
|
||||
reset_interrupts(dd);
|
||||
|
||||
ret = request_msix_irqs(dd);
|
||||
if (ret)
|
||||
goto fail;
|
||||
dd->msix_info.msix_entries = entries;
|
||||
spin_lock_init(&dd->msix_info.msix_lock);
|
||||
bitmap_zero(dd->msix_info.in_use_msix, total);
|
||||
dd->msix_info.max_requested = total;
|
||||
dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
hfi1_clean_up_interrupts(dd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
/**
|
||||
* msix_request_irq() - Allocate a free MSIx IRQ
|
||||
* @dd: valid devdata
|
||||
* @arg: context information for the IRQ
|
||||
* @handler: IRQ handler
|
||||
* @thread: IRQ thread handler (could be NULL)
|
||||
* @idx: zero base idx if multiple devices are needed
|
||||
* @type: affinty IRQ type
|
||||
*
|
||||
* Allocated an MSIx vector if available, and then create the appropriate
|
||||
* meta data needed to keep track of the pci IRQ request.
|
||||
*
|
||||
* Return:
|
||||
* < 0 Error
|
||||
* >= 0 MSIx vector
|
||||
*
|
||||
*/
|
||||
static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
|
||||
irq_handler_t handler, irq_handler_t thread,
|
||||
u32 idx, enum irq_type type)
|
||||
{
|
||||
int first_general, last_general;
|
||||
int first_sdma, last_sdma;
|
||||
int first_rx, last_rx;
|
||||
int i, ret = 0;
|
||||
unsigned long nr;
|
||||
int irq;
|
||||
int ret;
|
||||
const char *err_info;
|
||||
char name[MAX_NAME_SIZE];
|
||||
struct hfi1_msix_entry *me;
|
||||
|
||||
/* calculate the ranges we are going to use */
|
||||
first_general = 0;
|
||||
last_general = first_general + 1;
|
||||
first_sdma = last_general;
|
||||
last_sdma = first_sdma + dd->num_sdma;
|
||||
first_rx = last_sdma;
|
||||
last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
|
||||
/* Allocate an MSIx vector */
|
||||
spin_lock(&dd->msix_info.msix_lock);
|
||||
nr = find_first_zero_bit(dd->msix_info.in_use_msix,
|
||||
dd->msix_info.max_requested);
|
||||
if (nr < dd->msix_info.max_requested)
|
||||
__set_bit(nr, dd->msix_info.in_use_msix);
|
||||
spin_unlock(&dd->msix_info.msix_lock);
|
||||
|
||||
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
|
||||
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
|
||||
if (nr == dd->msix_info.max_requested)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Specific verification and determine the name */
|
||||
switch (type) {
|
||||
case IRQ_GENERAL:
|
||||
/* general interrupt must be MSIx vector 0 */
|
||||
if (nr) {
|
||||
spin_lock(&dd->msix_info.msix_lock);
|
||||
__clear_bit(nr, dd->msix_info.in_use_msix);
|
||||
spin_unlock(&dd->msix_info.msix_lock);
|
||||
dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
|
||||
nr);
|
||||
return -EINVAL;
|
||||
}
|
||||
snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
|
||||
err_info = "general";
|
||||
break;
|
||||
case IRQ_SDMA:
|
||||
snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
|
||||
dd->unit, idx);
|
||||
err_info = "sdma";
|
||||
break;
|
||||
case IRQ_RCVCTXT:
|
||||
snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
|
||||
dd->unit, idx);
|
||||
err_info = "receive context";
|
||||
break;
|
||||
case IRQ_OTHER:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
name[sizeof(name) - 1] = 0;
|
||||
|
||||
irq = pci_irq_vector(dd->pcidev, nr);
|
||||
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"%s: request for IRQ %d failed, MSIx %d, err %d\n",
|
||||
err_info, irq, idx, ret);
|
||||
spin_lock(&dd->msix_info.msix_lock);
|
||||
__clear_bit(nr, dd->msix_info.in_use_msix);
|
||||
spin_unlock(&dd->msix_info.msix_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check - the code expects all SDMA chip source
|
||||
* interrupts to be in the same CSR, starting at bit 0. Verify
|
||||
* that this is true by checking the bit location of the start.
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
BUILD_BUG_ON(IS_SDMA_START % 64);
|
||||
me = &dd->msix_info.msix_entries[nr];
|
||||
me->irq = irq;
|
||||
me->arg = arg;
|
||||
me->type = type;
|
||||
|
||||
for (i = 0; i < dd->num_msix_entries; i++) {
|
||||
struct hfi1_msix_entry *me = &dd->msix_entries[i];
|
||||
const char *err_info;
|
||||
irq_handler_t handler;
|
||||
irq_handler_t thread = NULL;
|
||||
void *arg = NULL;
|
||||
int idx;
|
||||
struct hfi1_ctxtdata *rcd = NULL;
|
||||
struct sdma_engine *sde = NULL;
|
||||
char name[MAX_NAME_SIZE];
|
||||
/* This is a request, so a failure is not fatal */
|
||||
ret = hfi1_get_irq_affinity(dd, me);
|
||||
if (ret)
|
||||
dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
|
||||
|
||||
/* obtain the arguments to pci_request_irq */
|
||||
if (first_general <= i && i < last_general) {
|
||||
idx = i - first_general;
|
||||
handler = general_interrupt;
|
||||
arg = dd;
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d", dd->unit);
|
||||
err_info = "general";
|
||||
me->type = IRQ_GENERAL;
|
||||
} else if (first_sdma <= i && i < last_sdma) {
|
||||
idx = i - first_sdma;
|
||||
sde = &dd->per_sdma[idx];
|
||||
handler = sdma_interrupt;
|
||||
arg = sde;
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
|
||||
err_info = "sdma";
|
||||
remap_sdma_interrupts(dd, idx, i);
|
||||
me->type = IRQ_SDMA;
|
||||
} else if (first_rx <= i && i < last_rx) {
|
||||
idx = i - first_rx;
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, idx);
|
||||
if (rcd) {
|
||||
/*
|
||||
* Set the interrupt register and mask for this
|
||||
* context's interrupt.
|
||||
*/
|
||||
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
||||
rcd->imask = ((u64)1) <<
|
||||
((IS_RCVAVAIL_START + idx) % 64);
|
||||
handler = receive_context_interrupt;
|
||||
thread = receive_context_thread;
|
||||
arg = rcd;
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d kctxt%d",
|
||||
dd->unit, idx);
|
||||
err_info = "receive context";
|
||||
remap_intr(dd, IS_RCVAVAIL_START + idx, i);
|
||||
me->type = IRQ_RCVCTXT;
|
||||
rcd->msix_intr = i;
|
||||
hfi1_rcd_put(rcd);
|
||||
}
|
||||
} else {
|
||||
/* not in our expected range - complain, then
|
||||
* ignore it
|
||||
*/
|
||||
dd_dev_err(dd,
|
||||
"Unexpected extra MSI-X interrupt %d\n", i);
|
||||
continue;
|
||||
}
|
||||
/* no argument, no interrupt */
|
||||
if (!arg)
|
||||
continue;
|
||||
/* make sure the name is terminated */
|
||||
name[sizeof(name) - 1] = 0;
|
||||
me->irq = pci_irq_vector(dd->pcidev, i);
|
||||
ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
|
||||
name);
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
|
||||
err_info, me->irq, idx, ret);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
|
||||
ret = hfi1_get_irq_affinity(dd, me);
|
||||
if (ret)
|
||||
dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return nr;
|
||||
}
|
||||
|
||||
void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
|
||||
/**
|
||||
* msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
|
||||
* @rcd: valid rcd context
|
||||
*
|
||||
*/
|
||||
int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
int i;
|
||||
int nr;
|
||||
|
||||
for (i = 0; i < dd->vnic.num_ctxt; i++) {
|
||||
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
|
||||
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
|
||||
|
||||
synchronize_irq(me->irq);
|
||||
}
|
||||
}
|
||||
|
||||
void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
struct hfi1_devdata *dd = rcd->dd;
|
||||
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
|
||||
|
||||
if (!me->arg) /* => no irq, no affinity */
|
||||
return;
|
||||
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
|
||||
me->arg = NULL;
|
||||
}
|
||||
|
||||
void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
struct hfi1_devdata *dd = rcd->dd;
|
||||
struct hfi1_msix_entry *me;
|
||||
int idx = rcd->ctxt;
|
||||
void *arg = rcd;
|
||||
int ret;
|
||||
|
||||
rcd->msix_intr = dd->vnic.msix_idx++;
|
||||
me = &dd->msix_entries[rcd->msix_intr];
|
||||
nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
|
||||
receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
|
||||
if (nr < 0)
|
||||
return nr;
|
||||
|
||||
/*
|
||||
* Set the interrupt register and mask for this
|
||||
* context's interrupt.
|
||||
*/
|
||||
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
||||
rcd->imask = ((u64)1) <<
|
||||
((IS_RCVAVAIL_START + idx) % 64);
|
||||
me->type = IRQ_RCVCTXT;
|
||||
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
|
||||
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
|
||||
rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
|
||||
rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
|
||||
rcd->msix_intr = nr;
|
||||
remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
|
||||
|
||||
ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
|
||||
receive_context_interrupt,
|
||||
receive_context_thread, arg,
|
||||
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
|
||||
me->irq, idx, ret);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
|
||||
ret = hfi1_get_irq_affinity(dd, me);
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to pin IRQ %d\n", ret);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_clean_up_interrupts() - Free all IRQ resources
|
||||
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
|
||||
* @sde: valid sdma engine
|
||||
*
|
||||
*/
|
||||
int msix_request_sdma_irq(struct sdma_engine *sde)
|
||||
{
|
||||
int nr;
|
||||
|
||||
nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
|
||||
sde->this_idx, IRQ_SDMA);
|
||||
if (nr < 0)
|
||||
return nr;
|
||||
sde->msix_intr = nr;
|
||||
remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* msix_request_irqs() - Allocate all MSIx IRQs
|
||||
* @dd: valid devdata structure
|
||||
*
|
||||
* Helper function to request the used MSIx IRQs.
|
||||
*
|
||||
*/
|
||||
int msix_request_irqs(struct hfi1_devdata *dd)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dd->num_sdma; i++) {
|
||||
struct sdma_engine *sde = &dd->per_sdma[i];
|
||||
|
||||
ret = msix_request_sdma_irq(sde);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < dd->n_krcv_queues; i++) {
|
||||
struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
|
||||
|
||||
if (rcd)
|
||||
ret = msix_request_rcd_irq(rcd);
|
||||
hfi1_rcd_put(rcd);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* msix_free_irq() - Free the specified MSIx resources and IRQ
|
||||
* @dd: valid devdata
|
||||
* @msix_intr: MSIx vector to free.
|
||||
*
|
||||
*/
|
||||
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
|
||||
{
|
||||
struct hfi1_msix_entry *me;
|
||||
|
||||
if (msix_intr >= dd->msix_info.max_requested)
|
||||
return;
|
||||
|
||||
me = &dd->msix_info.msix_entries[msix_intr];
|
||||
|
||||
if (!me->arg) /* => no irq, no affinity */
|
||||
return;
|
||||
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
pci_free_irq(dd->pcidev, msix_intr, me->arg);
|
||||
|
||||
me->arg = NULL;
|
||||
|
||||
spin_lock(&dd->msix_info.msix_lock);
|
||||
__clear_bit(msix_intr, dd->msix_info.in_use_msix);
|
||||
spin_unlock(&dd->msix_info.msix_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
|
||||
* @dd: valid device data data structure
|
||||
*
|
||||
* Free the MSIx and associated PCI resources, if they have been allocated.
|
||||
*/
|
||||
void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
void msix_clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
{
|
||||
int i;
|
||||
struct hfi1_msix_entry *me = dd->msix_entries;
|
||||
struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
|
||||
|
||||
/* remove irqs - must happen before disabling/turning off */
|
||||
for (i = 0; i < dd->num_msix_entries; i++, me++) {
|
||||
if (!me->arg) /* => no irq, no affinity */
|
||||
continue;
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
pci_free_irq(dd->pcidev, i, me->arg);
|
||||
}
|
||||
for (i = 0; i < dd->msix_info.max_requested; i++, me++)
|
||||
msix_free_irq(dd, i);
|
||||
|
||||
/* clean structures */
|
||||
kfree(dd->msix_entries);
|
||||
dd->msix_entries = NULL;
|
||||
dd->num_msix_entries = 0;
|
||||
kfree(dd->msix_info.msix_entries);
|
||||
dd->msix_info.msix_entries = NULL;
|
||||
dd->msix_info.max_requested = 0;
|
||||
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
}
|
||||
|
||||
/**
|
||||
* msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
|
||||
* @dd: valid devdata
|
||||
*/
|
||||
void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dd->vnic.num_ctxt; i++) {
|
||||
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
|
||||
struct hfi1_msix_entry *me;
|
||||
|
||||
me = &dd->msix_info.msix_entries[rcd->msix_intr];
|
||||
|
||||
synchronize_irq(me->irq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,14 +51,14 @@
|
|||
#include "hfi.h"
|
||||
|
||||
/* MSIx interface */
|
||||
int request_msix(struct hfi1_devdata *dd, u32 msireq);
|
||||
int set_up_interrupts(struct hfi1_devdata *dd);
|
||||
int request_msix_irqs(struct hfi1_devdata *dd);
|
||||
void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
|
||||
int msix_initialize(struct hfi1_devdata *dd);
|
||||
int msix_request_irqs(struct hfi1_devdata *dd);
|
||||
void msix_clean_up_interrupts(struct hfi1_devdata *dd);
|
||||
int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
|
||||
int msix_request_sdma_irq(struct sdma_engine *sde);
|
||||
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
|
||||
|
||||
/* VNIC interface */
|
||||
void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
|
||||
void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
|
||||
void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
|
||||
void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -405,6 +405,7 @@ struct sdma_engine {
|
|||
struct list_head flushlist;
|
||||
struct cpumask cpu_mask;
|
||||
struct kobject kobj;
|
||||
u32 msix_intr;
|
||||
};
|
||||
|
||||
int sdma_init(struct hfi1_devdata *dd, u8 port);
|
||||
|
|
|
@ -120,7 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
|
|||
uctxt->seq_cnt = 1;
|
||||
uctxt->is_vnic = true;
|
||||
|
||||
hfi1_set_vnic_msix_info(uctxt);
|
||||
msix_request_rcd_irq(uctxt);
|
||||
|
||||
hfi1_stats.sps_ctxts++;
|
||||
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
|
||||
|
@ -135,8 +135,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
|
|||
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
|
||||
flush_wc();
|
||||
|
||||
hfi1_reset_vnic_msix_info(uctxt);
|
||||
|
||||
/*
|
||||
* Disable receive context and interrupt available, reset all
|
||||
* RcvCtxtCtrl bits to default values.
|
||||
|
@ -148,6 +146,10 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
|
|||
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
|
||||
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
|
||||
|
||||
/* msix_intr will always be > 0, only clean up if this is true */
|
||||
if (uctxt->msix_intr)
|
||||
msix_free_irq(dd, uctxt->msix_intr);
|
||||
|
||||
uctxt->event_flags = 0;
|
||||
|
||||
hfi1_clear_tids(uctxt);
|
||||
|
@ -626,7 +628,7 @@ static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
|
|||
idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
|
||||
|
||||
/* ensure irqs see the change */
|
||||
hfi1_vnic_synchronize_irq(dd);
|
||||
msix_vnic_synchronize_irq(dd);
|
||||
|
||||
/* remove unread skbs */
|
||||
for (i = 0; i < vinfo->num_rx_q; i++) {
|
||||
|
@ -690,8 +692,6 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
|
|||
rc = hfi1_vnic_txreq_init(dd);
|
||||
if (rc)
|
||||
goto txreq_fail;
|
||||
|
||||
dd->vnic.msix_idx = dd->first_dyn_msix_idx;
|
||||
}
|
||||
|
||||
for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
|
||||
|
|
Loading…
Reference in New Issue