drm/i915: Drop dma_buf->k(un)map
No in-tree users left. Aside, I think mock_dmabuf would be a nice addition to drm mock/selftest helpers (we have some already), with an EXPORT_SYMBOL_FOR_TESTS_ONLY. Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: "Christian König" <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191118103536.17675-7-daniel.vetter@ffwll.ch
This commit is contained in:
parent
3e9e0c5c76
commit
9c8679612e
|
@ -93,40 +93,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|||
i915_gem_object_unpin_map(obj);
|
||||
}
|
||||
|
||||
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
|
||||
struct page *page;
|
||||
|
||||
if (page_num >= obj->base.size >> PAGE_SHIFT)
|
||||
return NULL;
|
||||
|
||||
if (!i915_gem_object_has_struct_page(obj))
|
||||
return NULL;
|
||||
|
||||
if (i915_gem_object_pin_pages(obj))
|
||||
return NULL;
|
||||
|
||||
/* Synchronisation is left to the caller (via .begin_cpu_access()) */
|
||||
page = i915_gem_object_get_page(obj, page_num);
|
||||
if (IS_ERR(page))
|
||||
goto err_unpin;
|
||||
|
||||
return kmap(page);
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
|
||||
|
||||
kunmap(virt_to_page(addr));
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
|
||||
|
@ -195,8 +161,6 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
|||
.map_dma_buf = i915_gem_map_dma_buf,
|
||||
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.map = i915_gem_dmabuf_kmap,
|
||||
.unmap = i915_gem_dmabuf_kunmap,
|
||||
.mmap = i915_gem_dmabuf_mmap,
|
||||
.vmap = i915_gem_dmabuf_vmap,
|
||||
.vunmap = i915_gem_dmabuf_vunmap,
|
||||
|
|
|
@ -76,20 +76,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|||
vm_unmap_ram(vaddr, mock->npages);
|
||||
}
|
||||
|
||||
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
struct mock_dmabuf *mock = to_mock(dma_buf);
|
||||
|
||||
return kmap(mock->pages[page_num]);
|
||||
}
|
||||
|
||||
static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
struct mock_dmabuf *mock = to_mock(dma_buf);
|
||||
|
||||
return kunmap(mock->pages[page_num]);
|
||||
}
|
||||
|
||||
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
@ -99,8 +85,6 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
|
|||
.map_dma_buf = mock_map_dma_buf,
|
||||
.unmap_dma_buf = mock_unmap_dma_buf,
|
||||
.release = mock_dmabuf_release,
|
||||
.map = mock_dmabuf_kmap,
|
||||
.unmap = mock_dmabuf_kunmap,
|
||||
.mmap = mock_dmabuf_mmap,
|
||||
.vmap = mock_dmabuf_vmap,
|
||||
.vunmap = mock_dmabuf_vunmap,
|
||||
|
|
Loading…
Reference in New Issue