iommu/amd: Avoid locking get_irq_table() from atomic context
get_irq_table() previously acquired amd_iommu_devtable_lock which is not a raw lock, and thus cannot be acquired from atomic context on PREEMPT_RT. Many calls to modify_irte*() come from atomic context due to the IRQ desc->lock, as does amd_iommu_update_ga() due to the preemption disabling in vcpu_load/put(). The only difference between calling get_irq_table() and reading from irq_lookup_table[] directly, other than the lock acquisition and amd_iommu_rlookup_table[] check, is if the table entry is unpopulated, which should never happen when looking up a devid that came from an irq_2_irte struct, as get_irq_table() would have already been called on that devid during irq_remapping_alloc(). The lock acquisition is not needed in these cases because entries in irq_lookup_table[] never change once non-NULL -- nor would the amd_iommu_devtable_lock usage in get_irq_table() provide meaningful protection if they did, since it's released before using the looked up table in the get_irq_table() caller. Rename the old get_irq_table() to alloc_irq_table(), and create a new lockless get_irq_table() to be used in non-allocating contexts that WARNs if it doesn't find what it's looking for. Signed-off-by: Scott Wood <swood@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
01ee04bade
commit
df42a04b15
|
@ -3595,7 +3595,22 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
|
|||
amd_iommu_dev_table[devid].data[2] = dte;
|
||||
}
|
||||
|
||||
static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
||||
static struct irq_remap_table *get_irq_table(u16 devid)
|
||||
{
|
||||
struct irq_remap_table *table;
|
||||
|
||||
if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
|
||||
"%s: no iommu for devid %x\n", __func__, devid))
|
||||
return NULL;
|
||||
|
||||
table = irq_lookup_table[devid];
|
||||
if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
|
||||
return NULL;
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
|
||||
{
|
||||
struct irq_remap_table *table = NULL;
|
||||
struct amd_iommu *iommu;
|
||||
|
@ -3682,7 +3697,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
|
|||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
table = get_irq_table(devid, false);
|
||||
table = alloc_irq_table(devid, false);
|
||||
if (!table)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -3733,7 +3748,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
|
|||
if (iommu == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
table = get_irq_table(devid, false);
|
||||
table = get_irq_table(devid);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3766,7 +3781,7 @@ static int modify_irte(u16 devid, int index, union irte *irte)
|
|||
if (iommu == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
table = get_irq_table(devid, false);
|
||||
table = get_irq_table(devid);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3790,7 +3805,7 @@ static void free_irte(u16 devid, int index)
|
|||
if (iommu == NULL)
|
||||
return;
|
||||
|
||||
table = get_irq_table(devid, false);
|
||||
table = get_irq_table(devid);
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
|
@ -4108,7 +4123,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
return ret;
|
||||
|
||||
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
|
||||
if (get_irq_table(devid, true))
|
||||
if (alloc_irq_table(devid, true))
|
||||
index = info->ioapic_pin;
|
||||
else
|
||||
ret = -ENOMEM;
|
||||
|
@ -4391,7 +4406,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
irt = get_irq_table(devid, false);
|
||||
irt = get_irq_table(devid);
|
||||
if (!irt)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
Loading…
Reference in New Issue