AMD IOMMU: move TLB flushing to the map/unmap helper functions

This patch moves the invocation of the flushing functions to the
map/unmap helpers because its common code in all dma_ops relevant
mapping/unmapping code.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Joerg Roedel 2008-09-04 15:49:46 +02:00 committed by Ingo Molnar
parent dbcc112e3b
commit 270cab2426
1 changed files with 5 additions and 14 deletions

View File

@ -795,6 +795,9 @@ static dma_addr_t __map_single(struct device *dev,
} }
address += offset; address += offset;
if (unlikely(iommu_has_npcache(iommu)))
iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
out: out:
return address; return address;
} }
@ -825,6 +828,8 @@ static void __unmap_single(struct amd_iommu *iommu,
} }
dma_ops_free_addresses(dma_dom, dma_addr, pages); dma_ops_free_addresses(dma_dom, dma_addr, pages);
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
} }
/* /*
@ -853,9 +858,6 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
if (addr == bad_dma_address) if (addr == bad_dma_address)
goto out; goto out;
if (iommu_has_npcache(iommu))
iommu_flush_pages(iommu, domain->id, addr, size);
if (iommu->need_sync) if (iommu->need_sync)
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
@ -885,8 +887,6 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
__unmap_single(iommu, domain->priv, dma_addr, size, dir); __unmap_single(iommu, domain->priv, dma_addr, size, dir);
iommu_flush_pages(iommu, domain->id, dma_addr, size);
if (iommu->need_sync) if (iommu->need_sync)
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
@ -948,9 +948,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
mapped_elems++; mapped_elems++;
} else } else
goto unmap; goto unmap;
if (iommu_has_npcache(iommu))
iommu_flush_pages(iommu, domain->id, s->dma_address,
s->dma_length);
} }
if (iommu->need_sync) if (iommu->need_sync)
@ -996,8 +993,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, s, nelems, i) { for_each_sg(sglist, s, nelems, i) {
__unmap_single(iommu, domain->priv, s->dma_address, __unmap_single(iommu, domain->priv, s->dma_address,
s->dma_length, dir); s->dma_length, dir);
iommu_flush_pages(iommu, domain->id, s->dma_address,
s->dma_length);
s->dma_address = s->dma_length = 0; s->dma_address = s->dma_length = 0;
} }
@ -1048,9 +1043,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
goto out; goto out;
} }
if (iommu_has_npcache(iommu))
iommu_flush_pages(iommu, domain->id, *dma_addr, size);
if (iommu->need_sync) if (iommu->need_sync)
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
@ -1082,7 +1074,6 @@ static void free_coherent(struct device *dev, size_t size,
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
iommu_flush_pages(iommu, domain->id, dma_addr, size);
if (iommu->need_sync) if (iommu->need_sync)
iommu_completion_wait(iommu); iommu_completion_wait(iommu);