dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro

Seeing the kunmap_atomic dma_buf_ops share the same name with a macro
in highmem.h, the former can be aliased if any dma-buf user includes
that header.

I'm personally trying to include highmem.h inside scatterlist.h and this
breaks the dma-buf code proper.

Christoph Hellwig suggested [1] renaming it and pushing this patch ASAP.

To maintain consistency I've renamed all four of kmap* and kunmap* to be
map* and unmap*. (Even though only kmap_atomic presently conflicts.)

[1] https://www.spinics.net/lists/target-devel/msg15070.html

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: http://patchwork.freedesktop.org/patch/msgid/1492630570-879-1-git-send-email-logang@deltatee.com
This commit is contained in:
Logan Gunthorpe 2017-04-19 13:36:10 -06:00 committed by Sumit Semwal
parent 418d59ef22
commit f9b67f0014
14 changed files with 61 additions and 61 deletions

View File

@ -405,8 +405,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf || !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf || !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release || !exp_info->ops->release
|| !exp_info->ops->kmap_atomic || !exp_info->ops->map_atomic
|| !exp_info->ops->kmap || !exp_info->ops->map
|| !exp_info->ops->mmap)) { || !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -872,7 +872,7 @@ void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{ {
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
return dmabuf->ops->kmap_atomic(dmabuf, page_num); return dmabuf->ops->map_atomic(dmabuf, page_num);
} }
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
@ -889,8 +889,8 @@ void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
{ {
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
if (dmabuf->ops->kunmap_atomic) if (dmabuf->ops->unmap_atomic)
dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
} }
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
@ -907,7 +907,7 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{ {
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
return dmabuf->ops->kmap(dmabuf, page_num); return dmabuf->ops->map(dmabuf, page_num);
} }
EXPORT_SYMBOL_GPL(dma_buf_kmap); EXPORT_SYMBOL_GPL(dma_buf_kmap);
@ -924,8 +924,8 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
{ {
WARN_ON(!dmabuf); WARN_ON(!dmabuf);
if (dmabuf->ops->kunmap) if (dmabuf->ops->unmap)
dmabuf->ops->kunmap(dmabuf, page_num, vaddr); dmabuf->ops->unmap(dmabuf, page_num, vaddr);
} }
EXPORT_SYMBOL_GPL(dma_buf_kunmap); EXPORT_SYMBOL_GPL(dma_buf_kunmap);

View File

@ -529,10 +529,10 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf, .map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf, .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
.kmap_atomic = armada_gem_dmabuf_no_kmap, .map_atomic = armada_gem_dmabuf_no_kmap,
.kunmap_atomic = armada_gem_dmabuf_no_kunmap, .unmap_atomic = armada_gem_dmabuf_no_kunmap,
.kmap = armada_gem_dmabuf_no_kmap, .map = armada_gem_dmabuf_no_kmap,
.kunmap = armada_gem_dmabuf_no_kunmap, .unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap, .mmap = armada_gem_dmabuf_mmap,
}; };

View File

@ -403,10 +403,10 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.map_dma_buf = drm_gem_map_dma_buf, .map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
.kmap = drm_gem_dmabuf_kmap, .map = drm_gem_dmabuf_kmap,
.kmap_atomic = drm_gem_dmabuf_kmap_atomic, .map_atomic = drm_gem_dmabuf_kmap_atomic,
.kunmap = drm_gem_dmabuf_kunmap, .unmap = drm_gem_dmabuf_kunmap,
.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap, .mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap, .vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap, .vunmap = drm_gem_dmabuf_vunmap,

View File

@ -200,10 +200,10 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf, .map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
.kmap = i915_gem_dmabuf_kmap, .map = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic, .map_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap, .unmap = i915_gem_dmabuf_kunmap,
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap, .mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap, .vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap, .vunmap = i915_gem_dmabuf_vunmap,

View File

@ -129,10 +129,10 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.map_dma_buf = mock_map_dma_buf, .map_dma_buf = mock_map_dma_buf,
.unmap_dma_buf = mock_unmap_dma_buf, .unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release, .release = mock_dmabuf_release,
.kmap = mock_dmabuf_kmap, .map = mock_dmabuf_kmap,
.kmap_atomic = mock_dmabuf_kmap_atomic, .map_atomic = mock_dmabuf_kmap_atomic,
.kunmap = mock_dmabuf_kunmap, .unmap = mock_dmabuf_kunmap,
.kunmap_atomic = mock_dmabuf_kunmap_atomic, .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap, .mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap, .vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap, .vunmap = mock_dmabuf_vunmap,

View File

@ -160,10 +160,10 @@ static struct dma_buf_ops omap_dmabuf_ops = {
.release = omap_gem_dmabuf_release, .release = omap_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access, .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
.kmap_atomic = omap_gem_dmabuf_kmap_atomic, .map_atomic = omap_gem_dmabuf_kmap_atomic,
.kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
.kmap = omap_gem_dmabuf_kmap, .map = omap_gem_dmabuf_kmap,
.kunmap = omap_gem_dmabuf_kunmap, .unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap, .mmap = omap_gem_dmabuf_mmap,
}; };

View File

@ -619,10 +619,10 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf, .map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release, .release = tegra_gem_prime_release,
.kmap_atomic = tegra_gem_prime_kmap_atomic, .map_atomic = tegra_gem_prime_kmap_atomic,
.kunmap_atomic = tegra_gem_prime_kunmap_atomic, .unmap_atomic = tegra_gem_prime_kunmap_atomic,
.kmap = tegra_gem_prime_kmap, .map = tegra_gem_prime_kmap,
.kunmap = tegra_gem_prime_kunmap, .unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap, .mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap, .vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap, .vunmap = tegra_gem_prime_vunmap,

View File

@ -191,10 +191,10 @@ static struct dma_buf_ops udl_dmabuf_ops = {
.detach = udl_detach_dma_buf, .detach = udl_detach_dma_buf,
.map_dma_buf = udl_map_dma_buf, .map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf, .unmap_dma_buf = udl_unmap_dma_buf,
.kmap = udl_dmabuf_kmap, .map = udl_dmabuf_kmap,
.kmap_atomic = udl_dmabuf_kmap_atomic, .map_atomic = udl_dmabuf_kmap_atomic,
.kunmap = udl_dmabuf_kunmap, .unmap = udl_dmabuf_kunmap,
.kunmap_atomic = udl_dmabuf_kunmap_atomic, .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap, .mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
}; };

View File

@ -108,10 +108,10 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.map_dma_buf = vmw_prime_map_dma_buf, .map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf, .unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL, .release = NULL,
.kmap = vmw_prime_dmabuf_kmap, .map = vmw_prime_dmabuf_kmap,
.kmap_atomic = vmw_prime_dmabuf_kmap_atomic, .map_atomic = vmw_prime_dmabuf_kmap_atomic,
.kunmap = vmw_prime_dmabuf_kunmap, .unmap = vmw_prime_dmabuf_kunmap,
.kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap, .mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap, .vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap, .vunmap = vmw_prime_dmabuf_vunmap,

View File

@ -356,8 +356,8 @@ static struct dma_buf_ops vb2_dc_dmabuf_ops = {
.detach = vb2_dc_dmabuf_ops_detach, .detach = vb2_dc_dmabuf_ops_detach,
.map_dma_buf = vb2_dc_dmabuf_ops_map, .map_dma_buf = vb2_dc_dmabuf_ops_map,
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
.kmap = vb2_dc_dmabuf_ops_kmap, .map = vb2_dc_dmabuf_ops_kmap,
.kmap_atomic = vb2_dc_dmabuf_ops_kmap, .map_atomic = vb2_dc_dmabuf_ops_kmap,
.vmap = vb2_dc_dmabuf_ops_vmap, .vmap = vb2_dc_dmabuf_ops_vmap,
.mmap = vb2_dc_dmabuf_ops_mmap, .mmap = vb2_dc_dmabuf_ops_mmap,
.release = vb2_dc_dmabuf_ops_release, .release = vb2_dc_dmabuf_ops_release,

View File

@ -504,8 +504,8 @@ static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.detach = vb2_dma_sg_dmabuf_ops_detach, .detach = vb2_dma_sg_dmabuf_ops_detach,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
.kmap = vb2_dma_sg_dmabuf_ops_kmap, .map = vb2_dma_sg_dmabuf_ops_kmap,
.kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
.vmap = vb2_dma_sg_dmabuf_ops_vmap, .vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap, .mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release, .release = vb2_dma_sg_dmabuf_ops_release,

View File

@ -342,8 +342,8 @@ static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.detach = vb2_vmalloc_dmabuf_ops_detach, .detach = vb2_vmalloc_dmabuf_ops_detach,
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
.kmap = vb2_vmalloc_dmabuf_ops_kmap, .map = vb2_vmalloc_dmabuf_ops_kmap,
.kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
.vmap = vb2_vmalloc_dmabuf_ops_vmap, .vmap = vb2_vmalloc_dmabuf_ops_vmap,
.mmap = vb2_vmalloc_dmabuf_ops_mmap, .mmap = vb2_vmalloc_dmabuf_ops_mmap,
.release = vb2_vmalloc_dmabuf_ops_release, .release = vb2_vmalloc_dmabuf_ops_release,

View File

@ -1020,10 +1020,10 @@ static const struct dma_buf_ops dma_buf_ops = {
.release = ion_dma_buf_release, .release = ion_dma_buf_release,
.begin_cpu_access = ion_dma_buf_begin_cpu_access, .begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access,
.kmap_atomic = ion_dma_buf_kmap, .map_atomic = ion_dma_buf_kmap,
.kunmap_atomic = ion_dma_buf_kunmap, .unmap_atomic = ion_dma_buf_kunmap,
.kmap = ion_dma_buf_kmap, .map = ion_dma_buf_kmap,
.kunmap = ion_dma_buf_kunmap, .unmap = ion_dma_buf_kunmap,
}; };
struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct dma_buf *ion_share_dma_buf(struct ion_client *client,

View File

@ -39,13 +39,13 @@ struct dma_buf_attachment;
/** /**
* struct dma_buf_ops - operations possible on struct dma_buf * struct dma_buf_ops - operations possible on struct dma_buf
* @kmap_atomic: maps a page from the buffer into kernel address * @map_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call. * space, users may not block until the subsequent unmap call.
* This callback must not sleep. * This callback must not sleep.
* @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep. * This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space. * @map: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer. * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel * @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply. * address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer * @vunmap: [optional] unmaps a vmap from the buffer
@ -206,10 +206,10 @@ struct dma_buf_ops {
* to be restarted. * to be restarted.
*/ */
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long); void *(*map_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long); void *(*map)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *); void (*unmap)(struct dma_buf *, unsigned long, void *);
/** /**
* @mmap: * @mmap: