iommu/amd: Drop the lock while allocating new irq remap table
The irq_remap_table is allocated while the iommu_table_lock is held with interrupts disabled. >From looking at the call sites, all callers are in the early device initialisation (apic_bsp_setup(), pci_enable_device(), pci_enable_msi()) so make sense to drop the lock which also enables interrupts and try to allocate that memory with GFP_KERNEL instead GFP_ATOMIC. Since during the allocation the iommu_table_lock is dropped, we need to recheck if table exists after the lock has been reacquired. I *think* that it is impossible that the "devid" entry appears in irq_lookup_table while the lock is dropped since the same device can only be probed once. However I check for both cases, just to be sure. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
2fcc1e8ac4
commit
993ca6e063
|
@ -3617,6 +3617,30 @@ static struct irq_remap_table *get_irq_table(u16 devid)
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct irq_remap_table *__alloc_irq_table(void)
|
||||||
|
{
|
||||||
|
struct irq_remap_table *table;
|
||||||
|
|
||||||
|
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
||||||
|
if (!table)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
|
||||||
|
if (!table->table) {
|
||||||
|
kfree(table);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
raw_spin_lock_init(&table->lock);
|
||||||
|
|
||||||
|
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
|
||||||
|
memset(table->table, 0,
|
||||||
|
MAX_IRQS_PER_TABLE * sizeof(u32));
|
||||||
|
else
|
||||||
|
memset(table->table, 0,
|
||||||
|
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
|
||||||
|
return table;
|
||||||
|
}
|
||||||
|
|
||||||
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
|
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
|
||||||
struct irq_remap_table *table)
|
struct irq_remap_table *table)
|
||||||
{
|
{
|
||||||
|
@ -3628,6 +3652,7 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
|
||||||
static struct irq_remap_table *alloc_irq_table(u16 devid)
|
static struct irq_remap_table *alloc_irq_table(u16 devid)
|
||||||
{
|
{
|
||||||
struct irq_remap_table *table = NULL;
|
struct irq_remap_table *table = NULL;
|
||||||
|
struct irq_remap_table *new_table = NULL;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u16 alias;
|
u16 alias;
|
||||||
|
@ -3646,42 +3671,44 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
|
||||||
table = irq_lookup_table[alias];
|
table = irq_lookup_table[alias];
|
||||||
if (table) {
|
if (table) {
|
||||||
set_remap_table_entry(iommu, devid, table);
|
set_remap_table_entry(iommu, devid, table);
|
||||||
goto out;
|
goto out_wait;
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&iommu_table_lock, flags);
|
||||||
|
|
||||||
/* Nothing there yet, allocate new irq remapping table */
|
/* Nothing there yet, allocate new irq remapping table */
|
||||||
table = kzalloc(sizeof(*table), GFP_ATOMIC);
|
new_table = __alloc_irq_table();
|
||||||
if (!table)
|
if (!new_table)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iommu_table_lock, flags);
|
||||||
|
|
||||||
|
table = irq_lookup_table[devid];
|
||||||
|
if (table)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
/* Initialize table spin-lock */
|
table = irq_lookup_table[alias];
|
||||||
raw_spin_lock_init(&table->lock);
|
if (table) {
|
||||||
|
set_remap_table_entry(iommu, devid, table);
|
||||||
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
|
goto out_wait;
|
||||||
if (!table->table) {
|
|
||||||
kfree(table);
|
|
||||||
table = NULL;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
|
table = new_table;
|
||||||
memset(table->table, 0,
|
new_table = NULL;
|
||||||
MAX_IRQS_PER_TABLE * sizeof(u32));
|
|
||||||
else
|
|
||||||
memset(table->table, 0,
|
|
||||||
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
|
|
||||||
|
|
||||||
|
|
||||||
set_remap_table_entry(iommu, devid, table);
|
set_remap_table_entry(iommu, devid, table);
|
||||||
if (devid != alias)
|
if (devid != alias)
|
||||||
set_remap_table_entry(iommu, alias, table);
|
set_remap_table_entry(iommu, alias, table);
|
||||||
|
|
||||||
out:
|
out_wait:
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&iommu_table_lock, flags);
|
spin_unlock_irqrestore(&iommu_table_lock, flags);
|
||||||
|
|
||||||
|
if (new_table) {
|
||||||
|
kmem_cache_free(amd_iommu_irq_cache, new_table->table);
|
||||||
|
kfree(new_table);
|
||||||
|
}
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue