powerpc/dma: untangle vio_dma_mapping_ops from dma_iommu_ops
vio_dma_mapping_ops currently does a lot of indirect calls through dma_iommu_ops, which not only make the code harder to follow but are also expensive in the post-spectre world. Unwind the indirect calls by calling the ppc_iommu_* or iommu_* APIs directly applicable, or just use the dma_iommu_* methods directly where we can. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
fbce251baa
commit
a20f507f57
|
@ -237,6 +237,7 @@ static inline void iommu_del_device(struct device *dev)
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_IOMMU_API */
|
#endif /* !CONFIG_IOMMU_API */
|
||||||
|
|
||||||
|
u64 dma_iommu_get_required_mask(struct device *dev);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline void *get_iommu_table_base(struct device *dev)
|
static inline void *get_iommu_table_base(struct device *dev)
|
||||||
|
|
|
@ -92,7 +92,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 dma_iommu_get_required_mask(struct device *dev)
|
u64 dma_iommu_get_required_mask(struct device *dev)
|
||||||
{
|
{
|
||||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
u64 mask;
|
u64 mask;
|
||||||
|
|
|
@ -492,7 +492,9 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
|
ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
||||||
|
dma_handle, dev->coherent_dma_mask, flag,
|
||||||
|
dev_to_node(dev));
|
||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
|
@ -507,8 +509,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
|
|
||||||
dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
|
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
|
||||||
|
|
||||||
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,22 +519,22 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
dma_addr_t ret = DMA_MAPPING_ERROR;
|
dma_addr_t ret = DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
tbl = get_iommu_table_base(dev);
|
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
|
||||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
|
goto out_fail;
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev),
|
||||||
return ret;
|
direction, attrs);
|
||||||
}
|
if (unlikely(ret == DMA_MAPPING_ERROR))
|
||||||
|
goto out_deallocate;
|
||||||
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
|
|
||||||
if (unlikely(dma_mapping_error(dev, ret))) {
|
|
||||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
out_deallocate:
|
||||||
|
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
||||||
|
out_fail:
|
||||||
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||||
|
@ -542,11 +543,9 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
|
|
||||||
tbl = get_iommu_table_base(dev);
|
|
||||||
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
|
|
||||||
|
|
||||||
|
iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
|
||||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,34 +554,32 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
int ret, count;
|
int ret, count;
|
||||||
size_t alloc_size = 0;
|
size_t alloc_size = 0;
|
||||||
|
|
||||||
tbl = get_iommu_table_base(dev);
|
|
||||||
for_each_sg(sglist, sgl, nelems, count)
|
for_each_sg(sglist, sgl, nelems, count)
|
||||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
if (vio_cmo_alloc(viodev, alloc_size)) {
|
if (vio_cmo_alloc(viodev, alloc_size))
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
goto out_fail;
|
||||||
return 0;
|
ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev),
|
||||||
}
|
direction, attrs);
|
||||||
|
if (unlikely(!ret))
|
||||||
ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
|
goto out_deallocate;
|
||||||
|
|
||||||
if (unlikely(!ret)) {
|
|
||||||
vio_cmo_dealloc(viodev, alloc_size);
|
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_sg(sglist, sgl, ret, count)
|
for_each_sg(sglist, sgl, ret, count)
|
||||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||||
if (alloc_size)
|
if (alloc_size)
|
||||||
vio_cmo_dealloc(viodev, alloc_size);
|
vio_cmo_dealloc(viodev, alloc_size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
out_deallocate:
|
||||||
|
vio_cmo_dealloc(viodev, alloc_size);
|
||||||
|
out_fail:
|
||||||
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vio_dma_iommu_unmap_sg(struct device *dev,
|
static void vio_dma_iommu_unmap_sg(struct device *dev,
|
||||||
|
@ -591,30 +588,18 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
size_t alloc_size = 0;
|
size_t alloc_size = 0;
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
tbl = get_iommu_table_base(dev);
|
|
||||||
for_each_sg(sglist, sgl, nelems, count)
|
for_each_sg(sglist, sgl, nelems, count)
|
||||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
|
||||||
|
|
||||||
vio_cmo_dealloc(viodev, alloc_size);
|
vio_cmo_dealloc(viodev, alloc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return dma_iommu_ops.dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 vio_dma_get_required_mask(struct device *dev)
|
|
||||||
{
|
|
||||||
return dma_iommu_ops.get_required_mask(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dma_map_ops vio_dma_mapping_ops = {
|
static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||||
.alloc = vio_dma_iommu_alloc_coherent,
|
.alloc = vio_dma_iommu_alloc_coherent,
|
||||||
.free = vio_dma_iommu_free_coherent,
|
.free = vio_dma_iommu_free_coherent,
|
||||||
|
@ -623,8 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||||
.map_page = vio_dma_iommu_map_page,
|
.map_page = vio_dma_iommu_map_page,
|
||||||
.unmap_page = vio_dma_iommu_unmap_page,
|
.unmap_page = vio_dma_iommu_unmap_page,
|
||||||
.dma_supported = vio_dma_iommu_dma_supported,
|
.dma_supported = dma_iommu_dma_supported,
|
||||||
.get_required_mask = vio_dma_get_required_mask,
|
.get_required_mask = dma_iommu_get_required_mask,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue