PCI/MSI: Use msi_on_each_desc()

Use the new iterator functions which pave the way for dynamically extending
MSI-X vectors.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Nishanth Menon <nm@ti.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Link: https://lore.kernel.org/r/20211206210748.142603657@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-12-06 23:51:18 +01:00
parent 9fb9eb4b59
commit ae24e28fef
3 changed files with 24 additions and 29 deletions

View File

@ -83,7 +83,7 @@ static int pci_msi_domain_check_cap(struct irq_domain *domain,
struct msi_domain_info *info,
struct device *dev)
{
struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);
/* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) &&
@ -98,7 +98,7 @@ static int pci_msi_domain_check_cap(struct irq_domain *domain,
unsigned int idx = 0;
/* Check for gaps in the entry indices */
for_each_msi_entry(desc, dev) {
msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
if (desc->msi_index != idx++)
return -ENOTSUPP;
}

View File

@ -28,7 +28,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
for_each_pci_msi_entry(desc, dev) {
msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
ret = arch_setup_msi_irq(dev, desc);
if (ret)
return ret < 0 ? ret : -ENOSPC;
@ -42,27 +42,24 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
struct msi_desc *desc;
int i;
for_each_pci_msi_entry(desc, dev) {
if (desc->irq) {
for (i = 0; i < desc->nvec_used; i++)
arch_teardown_msi_irq(desc->irq + i);
}
msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
for (i = 0; i < desc->nvec_used; i++)
arch_teardown_msi_irq(desc->irq + i);
}
}
static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
{
struct msi_desc *entry;
struct msi_desc *desc;
int avail = 0;
if (type != PCI_CAP_ID_MSIX || ret >= 0)
return ret;
/* Scan the MSI descriptors for successfully allocated ones. */
for_each_pci_msi_entry(entry, dev) {
if (entry->irq != 0)
avail++;
}
msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
avail++;
return avail ? avail : ret;
}

View File

@ -297,7 +297,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
@ -307,7 +306,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
msi_lock_descs(&dev->dev);
for_each_pci_msi_entry(entry, dev) {
msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (write_msg)
__pci_write_msi_msg(entry, &entry->msg);
pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
@ -406,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev)
if (!dev->no_64bit_msi)
return 0;
for_each_pci_msi_entry(entry, dev) {
msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (entry->msg.address_hi) {
pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
entry->msg.address_hi, entry->msg.address_lo);
return -EIO;
break;
}
}
return 0;
return !entry ? 0 : -EIO;
}
/**
@ -451,7 +450,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto fail;
/* All MSIs are unmasked by default; mask them all */
entry = first_pci_msi_entry(dev);
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
/* Configure MSI capability structure */
@ -541,11 +540,11 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
struct msi_desc *entry;
struct msi_desc *desc;
if (entries) {
for_each_pci_msi_entry(entry, dev) {
entries->vector = entry->irq;
msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
entries->vector = desc->irq;
entries++;
}
}
@ -747,15 +746,14 @@ static void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
desc = first_pci_msi_entry(dev);
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
pci_msi_unmask(desc, msi_multi_mask(desc));
desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
if (!WARN_ON_ONCE(!desc))
pci_msi_unmask(desc, msi_multi_mask(desc));
/* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->pci.msi_attrib.default_irq;
@ -831,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
static void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
struct msi_desc *desc;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
@ -842,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}
/* Return the device with MSI-X masked as initial states */
for_each_pci_msi_entry(entry, dev)
pci_msix_mask(entry);
msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
pci_msix_mask(desc);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);