iommu/dma: Use for_each_sg in iommu_dma_alloc
arch_dma_prep_coherent can handle physically contiguous ranges larger than PAGE_SIZE just fine, which means we don't need a page-based iterator. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
af751d4308
commit
23f88e0a7e
|
@ -606,15 +606,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||||
goto out_free_iova;
|
goto out_free_iova;
|
||||||
|
|
||||||
if (!(prot & IOMMU_CACHE)) {
|
if (!(prot & IOMMU_CACHE)) {
|
||||||
struct sg_mapping_iter miter;
|
struct scatterlist *sg;
|
||||||
/*
|
int i;
|
||||||
* The CPU-centric flushing implied by SG_MITER_TO_SG isn't
|
|
||||||
* sufficient here, so skip it by using the "wrong" direction.
|
for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
|
||||||
*/
|
arch_dma_prep_coherent(sg_page(sg), sg->length);
|
||||||
sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
|
|
||||||
while (sg_miter_next(&miter))
|
|
||||||
arch_dma_prep_coherent(miter.page, PAGE_SIZE);
|
|
||||||
sg_miter_stop(&miter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
|
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
|
||||||
|
|
Loading…
Reference in New Issue