iommu/tegra: gart: Optimize mapping / unmapping performance
Currently GART writes one page entry at a time. More optimal would be to aggregate the writes and flush BUS buffer in the end, this gives map/unmap 10-40% performance boost (depending on size of mapping) in comparison to flushing after each page entry update. Signed-off-by: Dmitry Osipenko <digetx@gmail.com> Acked-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1d7ae53b15
commit
2fc0ac180d
|
@ -290,7 +290,6 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
}
|
||||
}
|
||||
gart_set_pte(gart, iova, GART_PTE(pfn));
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -307,7 +306,6 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
gart_set_pte(gart, iova, 0);
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return bytes;
|
||||
}
|
||||
|
@ -373,6 +371,14 @@ static int gart_iommu_of_xlate(struct device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gart_iommu_sync(struct iommu_domain *domain)
|
||||
{
|
||||
struct gart_domain *gart_domain = to_gart_domain(domain);
|
||||
struct gart_device *gart = gart_domain->gart;
|
||||
|
||||
FLUSH_GART_REGS(gart);
|
||||
}
|
||||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.capable = gart_iommu_capable,
|
||||
.domain_alloc = gart_iommu_domain_alloc,
|
||||
|
@ -387,6 +393,8 @@ static const struct iommu_ops gart_iommu_ops = {
|
|||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = GART_IOMMU_PGSIZES,
|
||||
.of_xlate = gart_iommu_of_xlate,
|
||||
.iotlb_sync_map = gart_iommu_sync,
|
||||
.iotlb_sync = gart_iommu_sync,
|
||||
};
|
||||
|
||||
static int tegra_gart_suspend(struct device *dev)
|
||||
|
|
Loading…
Reference in New Issue