iommu/omap: Migrate to the generic fault report mechanism

Start using the generic fault report mechanism, as provided by
the IOMMU core, and remove its now-redundant omap_iommu_set_isr API.

Currently we're only interested in letting upper layers know about the
fault, so in case the faulting device is a remote processor, they could
restart it.

Dynamic PTE/TLB loading is not supported.

Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
Ohad Ben-Cohen 2011-09-13 15:26:29 -04:00 committed by Joerg Roedel
parent 4f3f8d9db3
commit e7f10f02ef
2 changed files with 4 additions and 30 deletions

View File

@ -32,6 +32,7 @@ struct omap_iommu {
void __iomem *regbase; void __iomem *regbase;
struct device *dev; struct device *dev;
void *isr_priv; void *isr_priv;
struct iommu_domain *domain;
unsigned int refcount; unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */ spinlock_t iommu_lock; /* global for this whole object */
@ -48,8 +49,6 @@ struct omap_iommu {
struct list_head mmap; struct list_head mmap;
struct mutex mmap_lock; /* protect mmap */ struct mutex mmap_lock; /* protect mmap */
int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, void *priv);
void *ctx; /* iommu context: registres saved area */ void *ctx; /* iommu context: registres saved area */
u32 da_start; u32 da_start;
u32 da_end; u32 da_end;

View File

@ -775,6 +775,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 da, errs; u32 da, errs;
u32 *iopgd, *iopte; u32 *iopgd, *iopte;
struct omap_iommu *obj = data; struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
if (!obj->refcount) if (!obj->refcount)
return IRQ_NONE; return IRQ_NONE;
@ -786,7 +787,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
/* Fault callback or TLB/PTE Dynamic loading */ /* Fault callback or TLB/PTE Dynamic loading */
if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) if (!report_iommu_fault(domain, obj->dev, da, 0))
return IRQ_HANDLED; return IRQ_HANDLED;
iommu_disable(obj); iommu_disable(obj);
@ -904,33 +905,6 @@ static void omap_iommu_detach(struct omap_iommu *obj)
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
} }
int omap_iommu_set_isr(const char *name,
int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
void *priv),
void *isr_priv)
{
struct device *dev;
struct omap_iommu *obj;
dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
device_match_by_alias);
if (!dev)
return -ENODEV;
obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
if (obj->refcount != 0) {
spin_unlock(&obj->iommu_lock);
return -EBUSY;
}
obj->isr = isr;
obj->isr_priv = isr_priv;
spin_unlock(&obj->iommu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(omap_iommu_set_isr);
/* /*
* OMAP Device MMU(IOMMU) detection * OMAP Device MMU(IOMMU) detection
*/ */
@ -1115,6 +1089,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
omap_domain->iommu_dev = oiommu; omap_domain->iommu_dev = oiommu;
oiommu->domain = domain;
out: out:
spin_unlock(&omap_domain->lock); spin_unlock(&omap_domain->lock);