drm/prime: Align gem_prime_export with obj_funcs.export
The idea is that gem_prime_export is deprecated in favor of obj_funcs.export. That's much easier to do if both have matching function signatures. Reviewed-by: Eric Anholt <eric@anholt.net> Reviewed-by: Emil Velikov <emil.velikov@collabora.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Maxime Ripard <maxime.ripard@bootlin.com> Cc: Sean Paul <sean@poorly.run> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: Zhi Wang <zhi.a.wang@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Jonathan Hunter <jonathanh@nvidia.com> Cc: Dave Airlie <airlied@redhat.com> Cc: Eric Anholt <eric@anholt.net> Cc: "Michel Dänzer" <michel.daenzer@amd.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Huang Rui <ray.huang@amd.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Hawking Zhang <Hawking.Zhang@amd.com> Cc: Feifei Xu <Feifei.Xu@amd.com> Cc: Jim Qu <Jim.Qu@amd.com> Cc: Evan Quan <evan.quan@amd.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Thomas Zimmermann <tdz@users.sourceforge.net> Cc: Kate Stewart <kstewart@linuxfoundation.org> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Jilayne Lovejoy <opensource@jilayne.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Mikulas Patocka <mpatocka@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Junwei Zhang <Jerry.Zhang@amd.com> Cc: intel-gvt-dev@lists.freedesktop.org Cc: intel-gfx@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Cc: linux-tegra@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20190614203615.12639-10-daniel.vetter@ffwll.ch
This commit is contained in:
parent
8b3026a74f
commit
e4fa8457b2
|
@ -345,8 +345,7 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
|||
* Returns:
|
||||
* Shared DMA buffer representing the GEM BO from the given device.
|
||||
*/
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
||||
|
@ -356,9 +355,9 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
|||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
buf = drm_gem_prime_export(dev, gobj, flags);
|
||||
buf = drm_gem_prime_export(gobj, flags);
|
||||
if (!IS_ERR(buf)) {
|
||||
buf->file->f_mapping = dev->anon_inode->i_mapping;
|
||||
buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
|
||||
buf->ops = &amdgpu_dmabuf_ops;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,8 +30,7 @@ struct drm_gem_object *
|
|||
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg);
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags);
|
||||
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
|
|
@ -485,8 +485,7 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
|
|||
};
|
||||
|
||||
struct dma_buf *
|
||||
armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
|
||||
int flags)
|
||||
armada_gem_prime_export(struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
|
@ -495,7 +494,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
|
|||
exp_info.flags = O_RDWR;
|
||||
exp_info.priv = obj;
|
||||
|
||||
return drm_gem_dmabuf_export(dev, &exp_info);
|
||||
return drm_gem_dmabuf_export(obj->dev, &exp_info);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
|
|
|
@ -35,8 +35,7 @@ struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
|
|||
size_t);
|
||||
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
|
||||
struct drm_mode_create_dumb *);
|
||||
struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct dma_buf *armada_gem_prime_export(struct drm_gem_object *obj, int flags);
|
||||
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
|
||||
struct dma_buf *);
|
||||
int armada_gem_map_import(struct armada_gem_object *);
|
||||
|
|
|
@ -384,9 +384,9 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
|
|||
if (obj->funcs && obj->funcs->export)
|
||||
dmabuf = obj->funcs->export(obj, flags);
|
||||
else if (dev->driver->gem_prime_export)
|
||||
dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
|
||||
dmabuf = dev->driver->gem_prime_export(obj, flags);
|
||||
else
|
||||
dmabuf = drm_gem_prime_export(dev, obj, flags);
|
||||
dmabuf = drm_gem_prime_export(obj, flags);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
/* normally the created dma-buf takes ownership of the ref,
|
||||
* but if that fails then drop the ref
|
||||
|
@ -814,7 +814,6 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
|||
|
||||
/**
|
||||
* drm_gem_prime_export - helper library implementation of the export callback
|
||||
* @dev: drm_device to export from
|
||||
* @obj: GEM object to export
|
||||
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
|
||||
*
|
||||
|
@ -822,10 +821,10 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
|||
* using the PRIME helpers. It is used as the default in
|
||||
* drm_gem_prime_handle_to_fd().
|
||||
*/
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
||||
int flags)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct dma_buf_export_info exp_info = {
|
||||
.exp_name = KBUILD_MODNAME, /* white lie for debug */
|
||||
.owner = dev->driver->fops->owner,
|
||||
|
|
|
@ -491,7 +491,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
|
|||
|
||||
obj->gvt_info = dmabuf_obj->info;
|
||||
|
||||
dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
|
||||
dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
gvt_vgpu_err("export dma-buf failed\n");
|
||||
ret = PTR_ERR(dmabuf);
|
||||
|
|
|
@ -3085,8 +3085,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags);
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
|
||||
|
||||
static inline struct i915_hw_ppgtt *
|
||||
i915_vm_to_ppgtt(struct i915_address_space *vm)
|
||||
|
|
|
@ -224,8 +224,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
|||
.end_cpu_access = i915_gem_end_cpu_access,
|
||||
};
|
||||
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags)
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
@ -242,7 +241,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return drm_gem_dmabuf_export(dev, &exp_info);
|
||||
return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
|
||||
}
|
||||
|
||||
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
||||
|
|
|
@ -37,7 +37,7 @@ static int igt_dmabuf_export(void *arg)
|
|||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
|
||||
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
||||
i915_gem_object_put(obj);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
pr_err("i915_gem_prime_export failed with err=%d\n",
|
||||
|
@ -61,7 +61,7 @@ static int igt_dmabuf_import_self(void *arg)
|
|||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
|
||||
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
pr_err("i915_gem_prime_export failed with err=%d\n",
|
||||
(int)PTR_ERR(dmabuf));
|
||||
|
@ -236,7 +236,7 @@ static int igt_dmabuf_export_vmap(void *arg)
|
|||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
|
||||
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
pr_err("i915_gem_prime_export failed with err=%d\n",
|
||||
(int)PTR_ERR(dmabuf));
|
||||
|
@ -283,7 +283,7 @@ static int igt_dmabuf_export_kmap(void *arg)
|
|||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
|
||||
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
||||
i915_gem_object_put(obj);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
err = PTR_ERR(dmabuf);
|
||||
|
|
|
@ -76,8 +76,7 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
|
|||
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
|
||||
|
||||
/* PRIME Interface */
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags);
|
||||
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *buffer);
|
||||
|
||||
|
|
|
@ -136,8 +136,7 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
|
|||
.mmap = omap_gem_dmabuf_mmap,
|
||||
};
|
||||
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
|
@ -146,7 +145,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
|||
exp_info.flags = flags;
|
||||
exp_info.priv = obj;
|
||||
|
||||
return drm_gem_dmabuf_export(dev, &exp_info);
|
||||
return drm_gem_dmabuf_export(obj->dev, &exp_info);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
|
|
|
@ -130,8 +130,7 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
|
|||
struct drm_file *file_priv);
|
||||
void radeon_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv);
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags);
|
||||
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
|
||||
unsigned int flags, int *vpos, int *hpos,
|
||||
|
|
|
@ -124,12 +124,11 @@ struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
|
|||
return bo->tbo.resv;
|
||||
}
|
||||
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gobj,
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
|
||||
int flags)
|
||||
{
|
||||
struct radeon_bo *bo = gem_to_radeon_bo(gobj);
|
||||
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
|
||||
return ERR_PTR(-EPERM);
|
||||
return drm_gem_prime_export(dev, gobj, flags);
|
||||
return drm_gem_prime_export(gobj, flags);
|
||||
}
|
||||
|
|
|
@ -629,20 +629,19 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
|
|||
.vunmap = tegra_gem_prime_vunmap,
|
||||
};
|
||||
|
||||
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
|
||||
struct drm_gem_object *gem,
|
||||
struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
|
||||
int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.exp_name = KBUILD_MODNAME;
|
||||
exp_info.owner = drm->driver->fops->owner;
|
||||
exp_info.owner = gem->dev->driver->fops->owner;
|
||||
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
|
||||
exp_info.size = gem->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = gem;
|
||||
|
||||
return drm_gem_dmabuf_export(drm, &exp_info);
|
||||
return drm_gem_dmabuf_export(gem->dev, &exp_info);
|
||||
}
|
||||
|
||||
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
|
||||
|
|
|
@ -73,8 +73,7 @@ extern const struct vm_operations_struct tegra_bo_vm_ops;
|
|||
int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
|
||||
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
|
||||
struct drm_gem_object *gem,
|
||||
struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
|
||||
int flags);
|
||||
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
|
||||
struct dma_buf *buf);
|
||||
|
|
|
@ -170,8 +170,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
|
|||
.release = drm_gem_dmabuf_release,
|
||||
};
|
||||
|
||||
struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
|
@ -180,7 +179,7 @@ struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
|
|||
exp_info.flags = flags;
|
||||
exp_info.priv = obj;
|
||||
|
||||
return drm_gem_dmabuf_export(dev, &exp_info);
|
||||
return drm_gem_dmabuf_export(obj->dev, &exp_info);
|
||||
}
|
||||
|
||||
static int udl_prime_create(struct drm_device *dev,
|
||||
|
|
|
@ -126,8 +126,7 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
|
|||
void udl_gem_free_object(struct drm_gem_object *gem_obj);
|
||||
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags);
|
||||
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
|
|
|
@ -658,8 +658,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
|
|||
schedule_work(&vc4->bo_cache.time_work);
|
||||
}
|
||||
|
||||
struct dma_buf *
|
||||
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
|
||||
struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
struct vc4_bo *bo = to_vc4_bo(obj);
|
||||
struct dma_buf *dmabuf;
|
||||
|
@ -681,7 +680,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
dmabuf = drm_gem_prime_export(dev, obj, flags);
|
||||
dmabuf = drm_gem_prime_export(obj, flags);
|
||||
if (IS_ERR(dmabuf))
|
||||
vc4_bo_dec_usecnt(bo);
|
||||
|
||||
|
|
|
@ -708,8 +708,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
|
|||
int vc4_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
struct dma_buf *vc4_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
|
||||
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -108,7 +108,7 @@ static int attach_dmabuf(struct drm_device *dev,
|
|||
if (obj->dma_buf)
|
||||
return 0;
|
||||
|
||||
dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
|
||||
dmabuf = dev->driver->gem_prime_export(obj, 0);
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
|
|
|
@ -581,8 +581,8 @@ struct drm_driver {
|
|||
* Export hook for GEM drivers. Deprecated in favour of
|
||||
* &drm_gem_object_funcs.export.
|
||||
*/
|
||||
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
|
||||
int flags);
|
||||
/**
|
||||
* @gem_prime_import:
|
||||
*
|
||||
|
|
|
@ -91,8 +91,7 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
|||
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
|
||||
|
||||
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
||||
int flags);
|
||||
|
||||
/* helper functions for importing */
|
||||
|
|
Loading…
Reference in New Issue