iommu/amd: Use iommu core for passthrough mode

Remove the AMD IOMMU driver implementation for passthrough
mode and rely on the new iommu core features for that.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2015-07-28 16:58:48 +02:00
parent 55c99a4dc5
commit 1e6a7b04c0
2 changed files with 3 additions and 65 deletions

View File

@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
* Domain for untranslated devices - only allocated * Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line. * if iommu=pt passed on kernel cmd line.
*/ */
static struct protection_domain *pt_domain;
static const struct iommu_ops amd_iommu_ops; static const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier); static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@ -96,7 +94,7 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */ struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */ u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */ bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */ bool passthrough; /* Device is identity mapped */
struct { struct {
bool enabled; bool enabled;
int qdep; int qdep;
@ -116,7 +114,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache; struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain); static void update_domain(struct protection_domain *domain);
static int alloc_passthrough_domain(void);
static int protection_domain_init(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain);
/**************************************************************************** /****************************************************************************
@ -2221,15 +2218,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
do_detach(head); do_detach(head);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
/*
* If we run in passthrough mode the device must be assigned to the
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
} }
/* /*
@ -2287,7 +2275,7 @@ static int amd_iommu_add_device(struct device *dev)
BUG_ON(!dev_data); BUG_ON(!dev_data);
if (dev_data->iommu_v2) if (iommu_pass_through || dev_data->iommu_v2)
iommu_request_dm_for_dev(dev); iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */ /* Domains are initialized for this device - have a look what we ended up with */
@ -2947,21 +2935,6 @@ out_err:
return NULL; return NULL;
} }
static int alloc_passthrough_domain(void)
{
if (pt_domain != NULL)
return 0;
/* allocate passthrough domain */
pt_domain = protection_domain_alloc();
if (!pt_domain)
return -ENOMEM;
pt_domain->mode = PAGE_MODE_NONE;
return 0;
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{ {
struct protection_domain *pdomain; struct protection_domain *pdomain;
@ -3222,33 +3195,6 @@ static const struct iommu_ops amd_iommu_ops = {
* *
*****************************************************************************/ *****************************************************************************/
int __init amd_iommu_init_passthrough(void)
{
struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
int ret;
ret = alloc_passthrough_domain();
if (ret)
return ret;
for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;
dev_data = get_dev_data(&dev->dev);
dev_data->passthrough = true;
attach_device(&dev->dev, pt_domain);
}
amd_iommu_stats_init();
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}
/* IOMMUv2 specific functions */ /* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb) int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{ {

View File

@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
return true; return true;
} }
static int amd_iommu_init_dma(void)
{
if (iommu_pass_through)
return amd_iommu_init_passthrough();
else
return amd_iommu_init_dma_ops();
}
/**************************************************************************** /****************************************************************************
* *
* AMD IOMMU Initialization State Machine * AMD IOMMU Initialization State Machine
@ -2073,7 +2065,7 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break; break;
case IOMMU_INTERRUPTS_EN: case IOMMU_INTERRUPTS_EN:
ret = amd_iommu_init_dma(); ret = amd_iommu_init_dma_ops();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break; break;
case IOMMU_DMA_OPS: case IOMMU_DMA_OPS: