drm: omapdrm: Map pages for DMA in DMA_TO_DEVICE direction

The display engine only reads from memory, there's no need to use
bidirectional DMA mappings. Use DMA_TO_DEVICE instead.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
This commit is contained in:
Laurent Pinchart 2017-04-21 00:33:58 +03:00 committed by Tomi Valkeinen
parent 930dc19c0b
commit 97817fd465
2 changed files with 7 additions and 8 deletions

View File

@ -254,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
0, PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addrs[i])) {
dev_warn(dev->dev,
@ -262,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = i - 1; i >= 0; --i) {
dma_unmap_page(dev->dev, addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_TO_DEVICE);
}
ret = -ENOMEM;
@ -322,7 +322,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_TO_DEVICE);
}
kfree(omap_obj->dma_addrs);
@ -744,7 +744,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
if (omap_obj->dma_addrs[pgoff]) {
dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
PAGE_SIZE, DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_TO_DEVICE);
omap_obj->dma_addrs[pgoff] = 0;
}
}
@ -767,8 +767,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
dma_addr_t addr;
addr = dma_map_page(dev->dev, pages[i], 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
PAGE_SIZE, dir);
if (dma_mapping_error(dev->dev, addr)) {
dev_warn(dev->dev, "%s: failed to map page\n",
__func__);

View File

@ -210,7 +210,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@ -227,7 +227,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
return obj;
fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);