x86/amd-iommu: Cleanup attach/detach_device code

This patch cleans up the attach_device and detach_device
paths and fixes reference counting while at it.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
Joerg Roedel 2009-11-26 14:49:59 +01:00
parent 7c392cbe98
commit 7f760ddd70
1 changed files with 58 additions and 44 deletions

View File

@ -1329,7 +1329,6 @@ static bool dma_ops_domain(struct protection_domain *domain)
static void set_dte_entry(u16 devid, struct protection_domain *domain) static void set_dte_entry(u16 devid, struct protection_domain *domain)
{ {
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
u64 pte_root = virt_to_phys(domain->pt_root); u64 pte_root = virt_to_phys(domain->pt_root);
BUG_ON(amd_iommu_pd_table[devid] != NULL); BUG_ON(amd_iommu_pd_table[devid] != NULL);
@ -1344,18 +1343,11 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
amd_iommu_pd_table[devid] = domain; amd_iommu_pd_table[devid] = domain;
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
/* Flush the changes DTE entry */
iommu_queue_inv_dev_entry(iommu, devid);
} }
static void clear_dte_entry(u16 devid) static void clear_dte_entry(u16 devid)
{ {
struct protection_domain *domain = amd_iommu_pd_table[devid]; struct protection_domain *domain = amd_iommu_pd_table[devid];
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
BUG_ON(domain == NULL); BUG_ON(domain == NULL);
@ -1368,11 +1360,51 @@ static void clear_dte_entry(u16 devid)
amd_iommu_dev_table[devid].data[2] = 0; amd_iommu_dev_table[devid].data[2] = 0;
amd_iommu_apply_erratum_63(devid); amd_iommu_apply_erratum_63(devid);
}
static void do_attach(struct device *dev, struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
u16 devid;
devid = get_device_id(dev);
iommu = amd_iommu_rlookup_table[devid];
dev_data = get_dev_data(dev);
/* Update data structures */
dev_data->domain = domain;
list_add(&dev_data->list, &domain->dev_list);
set_dte_entry(devid, domain);
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
/* Flush the DTE entry */
iommu_queue_inv_dev_entry(iommu, devid);
}
static void do_detach(struct device *dev)
{
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
u16 devid;
devid = get_device_id(dev);
iommu = amd_iommu_rlookup_table[devid];
dev_data = get_dev_data(dev);
/* decrease reference counters */ /* decrease reference counters */
domain->dev_iommu[iommu->index] -= 1; dev_data->domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1; dev_data->domain->dev_cnt -= 1;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(devid);
/* Flush the DTE entry */
iommu_queue_inv_dev_entry(iommu, devid); iommu_queue_inv_dev_entry(iommu, devid);
} }
@ -1384,12 +1416,10 @@ static int __attach_device(struct device *dev,
struct protection_domain *domain) struct protection_domain *domain)
{ {
struct iommu_dev_data *dev_data, *alias_data; struct iommu_dev_data *dev_data, *alias_data;
u16 devid, alias;
devid = get_device_id(dev);
alias = amd_iommu_alias_table[devid];
dev_data = get_dev_data(dev); dev_data = get_dev_data(dev);
alias_data = get_dev_data(dev_data->alias); alias_data = get_dev_data(dev_data->alias);
if (!alias_data) if (!alias_data)
return -EINVAL; return -EINVAL;
@ -1406,21 +1436,16 @@ static int __attach_device(struct device *dev,
return -EBUSY; return -EBUSY;
/* Do real assignment */ /* Do real assignment */
if (alias != devid) { if (dev_data->alias != dev) {
if (alias_data->domain == NULL) { alias_data = get_dev_data(dev_data->alias);
alias_data->domain = domain; if (alias_data->domain == NULL)
list_add(&alias_data->list, &domain->dev_list); do_attach(dev_data->alias, domain);
set_dte_entry(alias, domain);
}
atomic_inc(&alias_data->bind); atomic_inc(&alias_data->bind);
} }
if (dev_data->domain == NULL) { if (dev_data->domain == NULL)
dev_data->domain = domain; do_attach(dev, domain);
list_add(&dev_data->list, &domain->dev_list);
set_dte_entry(devid, domain);
}
atomic_inc(&dev_data->bind); atomic_inc(&dev_data->bind);
@ -1459,35 +1484,24 @@ static int attach_device(struct device *dev,
*/ */
static void __detach_device(struct device *dev) static void __detach_device(struct device *dev)
{ {
u16 devid = get_device_id(dev), alias;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
struct iommu_dev_data *dev_data = get_dev_data(dev); struct iommu_dev_data *dev_data = get_dev_data(dev);
struct iommu_dev_data *alias_data; struct iommu_dev_data *alias_data;
unsigned long flags; unsigned long flags;
BUG_ON(!iommu); BUG_ON(!dev_data->domain);
devid = get_device_id(dev); spin_lock_irqsave(&dev_data->domain->lock, flags);
alias = get_device_id(dev_data->alias);
if (devid != alias) { if (dev_data->alias != dev) {
alias_data = get_dev_data(dev_data->alias); alias_data = get_dev_data(dev_data->alias);
if (atomic_dec_and_test(&alias_data->bind)) { if (atomic_dec_and_test(&alias_data->bind))
spin_lock_irqsave(&alias_data->domain->lock, flags); do_detach(dev_data->alias);
clear_dte_entry(alias);
list_del(&alias_data->list);
spin_unlock_irqrestore(&alias_data->domain->lock, flags);
alias_data->domain = NULL;
}
} }
if (atomic_dec_and_test(&dev_data->bind)) { if (atomic_dec_and_test(&dev_data->bind))
spin_lock_irqsave(&dev_data->domain->lock, flags); do_detach(dev);
clear_dte_entry(devid);
list_del(&dev_data->list); spin_unlock_irqrestore(&dev_data->domain->lock, flags);
spin_unlock_irqrestore(&dev_data->domain->lock, flags);
dev_data->domain = NULL;
}
/* /*
* If we run in passthrough mode the device must be assigned to the * If we run in passthrough mode the device must be assigned to the