PCI/MSI: Use msi_add_msi_desc()

Simplify the allocation of MSI descriptors by using msi_add_msi_desc()
which moves the storage handling to core code and prepares for dynamic
extension of the MSI-X vector space.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Nishanth Menon <nm@ti.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
Link: https://lore.kernel.org/r/20211206210748.035348646@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-12-06 23:51:15 +01:00
parent 5512c5eaf5
commit 71020a3c0d
1 changed files with 50 additions and 60 deletions

View File

@ -376,40 +376,41 @@ static int pci_setup_msi_context(struct pci_dev *dev)
return ret;
}
static struct msi_desc *
msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
struct irq_affinity_desc *masks)
{
struct msi_desc *entry;
struct msi_desc desc;
u16 control;
/* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks);
if (!entry)
return NULL;
memset(&desc, 0, sizeof(desc));
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
/* Respect XEN's mask disabling */
if (pci_msi_ignore_mask)
control &= ~PCI_MSI_FLAGS_MASKBIT;
entry->pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!!(control & PCI_MSI_FLAGS_MASKBIT);
entry->pci.msi_attrib.default_irq = dev->irq;
entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
desc.nvec_used = nvec;
desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
desc.pci.msi_attrib.default_irq = dev->irq;
desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
desc.affinity = masks;
if (control & PCI_MSI_FLAGS_64BIT)
entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */
if (entry->pci.msi_attrib.can_mask)
pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
if (desc.pci.msi_attrib.can_mask)
pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
return entry;
return msi_add_msi_desc(&dev->dev, &desc);
}
static int msi_verify_entries(struct pci_dev *dev)
@ -459,17 +460,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
masks = irq_create_affinity_masks(nvec, affd);
msi_lock_descs(&dev->dev);
entry = msi_setup_entry(dev, nvec, masks);
if (!entry) {
ret = -ENOMEM;
ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
goto fail;
}
/* All MSIs are unmasked by default; mask them all */
entry = first_pci_msi_entry(dev);
pci_msi_mask(entry, msi_multi_mask(entry));
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret)
@ -519,48 +517,40 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
struct irq_affinity_desc *masks)
{
int i, vec_count = pci_msix_vec_count(dev);
int ret = 0, i, vec_count = pci_msix_vec_count(dev);
struct irq_affinity_desc *curmsk;
struct msi_desc *entry;
struct msi_desc desc;
void __iomem *addr;
for (i = 0, curmsk = masks; i < nvec; i++) {
entry = alloc_msi_entry(&dev->dev, 1, curmsk);
if (!entry) {
/* No enough memory. Don't try again */
return -ENOMEM;
memset(&desc, 0, sizeof(desc));
desc.nvec_used = 1;
desc.pci.msi_attrib.is_msix = 1;
desc.pci.msi_attrib.is_64 = 1;
desc.pci.msi_attrib.default_irq = dev->irq;
desc.pci.mask_base = base;
for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
desc.msi_index = entries ? entries[i].entry : i;
desc.affinity = masks ? curmsk : NULL;
desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!desc.pci.msi_attrib.is_virtual;
if (!desc.pci.msi_attrib.can_mask) {
addr = pci_msix_desc_addr(&desc);
desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
entry->pci.msi_attrib.is_msix = 1;
entry->pci.msi_attrib.is_64 = 1;
if (entries)
entry->msi_index = entries[i].entry;
else
entry->msi_index = i;
entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!entry->pci.msi_attrib.is_virtual;
entry->pci.msi_attrib.default_irq = dev->irq;
entry->pci.mask_base = base;
if (entry->pci.msi_attrib.can_mask) {
addr = pci_msix_desc_addr(entry);
entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
ret = msi_add_msi_desc(&dev->dev, &desc);
if (ret)
break;
}
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
if (masks)
curmsk++;
}
return 0;
return ret;
}
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
@ -598,7 +588,7 @@ static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
masks = irq_create_affinity_masks(nvec, affd);
msi_lock_descs(&dev->dev);
ret = msix_setup_entries(dev, base, entries, nvec, masks);
ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
if (ret)
goto out_free;