iommu/amd: Remove the special case from alloc_irq_table()
alloc_irq_table() has a special ioapic argument. If set then it will pre-allocate / reserve the first 32 indexes. The argument is only once true and it would make alloc_irq_table() a little simpler if we would extract the special bits to the caller. The caller of irq_remapping_alloc() is holding irq_domain_mutex so the initialization of iommu->irte_ops->set_allocated() should not race against other user. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ea6166f4b8
commit
fde65dd3d3
|
@ -3617,7 +3617,7 @@ static struct irq_remap_table *get_irq_table(u16 devid)
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
|
static struct irq_remap_table *alloc_irq_table(u16 devid)
|
||||||
{
|
{
|
||||||
struct irq_remap_table *table = NULL;
|
struct irq_remap_table *table = NULL;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
@ -3651,10 +3651,6 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
|
||||||
/* Initialize table spin-lock */
|
/* Initialize table spin-lock */
|
||||||
raw_spin_lock_init(&table->lock);
|
raw_spin_lock_init(&table->lock);
|
||||||
|
|
||||||
if (ioapic)
|
|
||||||
/* Keep the first 32 indexes free for IOAPIC interrupts */
|
|
||||||
table->min_index = 32;
|
|
||||||
|
|
||||||
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
|
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
|
||||||
if (!table->table) {
|
if (!table->table) {
|
||||||
kfree(table);
|
kfree(table);
|
||||||
|
@ -3669,12 +3665,6 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
|
||||||
memset(table->table, 0,
|
memset(table->table, 0,
|
||||||
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
|
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
|
||||||
|
|
||||||
if (ioapic) {
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < 32; ++i)
|
|
||||||
iommu->irte_ops->set_allocated(table, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
irq_lookup_table[devid] = table;
|
irq_lookup_table[devid] = table;
|
||||||
set_dte_irq_entry(devid, table);
|
set_dte_irq_entry(devid, table);
|
||||||
|
@ -3704,7 +3694,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
table = alloc_irq_table(devid, false);
|
table = alloc_irq_table(devid);
|
||||||
if (!table)
|
if (!table)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -4130,10 +4120,26 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
|
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
|
||||||
if (alloc_irq_table(devid, true))
|
struct irq_remap_table *table;
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
|
table = alloc_irq_table(devid);
|
||||||
|
if (table) {
|
||||||
|
if (!table->min_index) {
|
||||||
|
/*
|
||||||
|
* Keep the first 32 indexes free for IOAPIC
|
||||||
|
* interrupts.
|
||||||
|
*/
|
||||||
|
table->min_index = 32;
|
||||||
|
iommu = amd_iommu_rlookup_table[devid];
|
||||||
|
for (i = 0; i < 32; ++i)
|
||||||
|
iommu->irte_ops->set_allocated(table, i);
|
||||||
|
}
|
||||||
|
WARN_ON(table->min_index != 32);
|
||||||
index = info->ioapic_pin;
|
index = info->ioapic_pin;
|
||||||
else
|
} else {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
|
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue