drm: etnaviv: fix common struct sg_table related issues

The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
returns the number of the created entries in the DMA address space.
However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
dma_unmap_sg must be called with the original number of the entries
passed to the dma_map_sg().

struct sg_table is a common structure used for describing a non-contiguous
memory buffer, used commonly in the DRM and graphics subsystems. It
consists of a scatterlist with memory pages and DMA addresses (sgl entry),
as well as the number of scatterlist entries: CPU pages (orig_nents entry)
and DMA mapped pages (nents entry).

It turned out that it was a common mistake to misuse nents and orig_nents
entries, calling DMA-mapping functions with a wrong number of entries or
ignoring the number of mapped entries returned by the dma_map_sg()
function.

To avoid such issues, lets use a common dma-mapping wrappers operating
directly on the struct sg_table objects and use scatterlist page
iterators where possible. This, almost always, hides references to the
nents and orig_nents entries, making the code robust, easier to follow
and copy/paste safe.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Lucas Stach <l.stach@pengutronix.de>
This commit is contained in:
Marek Szyprowski 2020-04-28 13:08:23 +02:00
parent efcb3730ef
commit 182354a526
2 changed files with 9 additions and 18 deletions

View File

@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
* because display controller, GPU, etc. are not coherent. * because display controller, GPU, etc. are not coherent.
*/ */
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
} }
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
* discard those writes. * discard those writes.
*/ */
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
} }
/* called with etnaviv_obj->lock held */ /* called with etnaviv_obj->lock held */
@ -404,8 +404,7 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
} }
if (etnaviv_obj->flags & ETNA_BO_CACHED) { if (etnaviv_obj->flags & ETNA_BO_CACHED) {
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op)); etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op; etnaviv_obj->last_cpu_prep_op = op;
} }
@ -421,8 +420,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
if (etnaviv_obj->flags & ETNA_BO_CACHED) { if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */ /* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0; etnaviv_obj->last_cpu_prep_op = 0;
} }

View File

@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot) struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg; { struct scatterlist *sg;
unsigned int da = iova; unsigned int da = iova;
unsigned int i, j; unsigned int i;
int ret; int ret;
if (!context || !sgt) if (!context || !sgt)
return -EINVAL; return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
u32 pa = sg_dma_address(sg) - sg->offset; u32 pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset;
@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
return 0; return 0;
fail: fail:
da = iova; etnaviv_context_unmap(context, iova, da - iova);
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);
da += bytes;
}
return ret; return ret;
} }
@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
unsigned int da = iova; unsigned int da = iova;
int i; int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes); etnaviv_context_unmap(context, da, bytes);