2018-09-11 03:27:58 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
|
|
|
|
|
|
|
|
#ifndef __PANFROST_GEM_H__
|
|
|
|
#define __PANFROST_GEM_H__
|
|
|
|
|
|
|
|
#include <drm/drm_gem_shmem_helper.h>
|
|
|
|
#include <drm/drm_mm.h>
|
|
|
|
|
2019-08-13 23:01:15 +08:00
|
|
|
struct panfrost_mmu;
|
|
|
|
|
2018-09-11 03:27:58 +08:00
|
|
|
struct panfrost_gem_object {
|
|
|
|
struct drm_gem_shmem_object base;
|
2019-07-27 06:09:43 +08:00
|
|
|
struct sg_table *sgts;
|
2018-09-11 03:27:58 +08:00
|
|
|
|
2020-01-16 10:15:54 +08:00
|
|
|
/*
|
|
|
|
* Use a list for now. If searching a mapping ever becomes the
|
|
|
|
* bottleneck, we should consider using an RB-tree, or even better,
|
|
|
|
* let the core store drm_gem_object_mapping entries (where we
|
|
|
|
* could place driver specific data) instead of drm_gem_object ones
|
|
|
|
* in its drm_file->object_idr table.
|
|
|
|
*
|
|
|
|
* struct drm_gem_object_mapping {
|
|
|
|
* struct drm_gem_object *obj;
|
|
|
|
* void *driver_priv;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
struct list_head list;
|
|
|
|
struct mutex lock;
|
|
|
|
} mappings;
|
|
|
|
|
2019-11-29 21:59:08 +08:00
|
|
|
/*
|
|
|
|
* Count the number of jobs referencing this BO so we don't let the
|
|
|
|
* shrinker reclaim this object prematurely.
|
|
|
|
*/
|
|
|
|
atomic_t gpu_usecount;
|
|
|
|
|
2019-07-12 05:56:14 +08:00
|
|
|
bool noexec :1;
|
2019-07-27 06:09:43 +08:00
|
|
|
bool is_heap :1;
|
2018-09-11 03:27:58 +08:00
|
|
|
};
|
|
|
|
|
2020-01-16 10:15:54 +08:00
|
|
|
struct panfrost_gem_mapping {
|
|
|
|
struct list_head node;
|
|
|
|
struct kref refcount;
|
|
|
|
struct panfrost_gem_object *obj;
|
|
|
|
struct drm_mm_node mmnode;
|
|
|
|
struct panfrost_mmu *mmu;
|
|
|
|
bool active :1;
|
|
|
|
};
|
|
|
|
|
2018-09-11 03:27:58 +08:00
|
|
|
static inline
|
|
|
|
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
|
|
|
|
}
|
|
|
|
|
2020-01-16 10:15:54 +08:00
|
|
|
static inline struct panfrost_gem_mapping *
|
|
|
|
drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
|
2019-07-27 06:09:43 +08:00
|
|
|
{
|
2020-01-16 10:15:54 +08:00
|
|
|
return container_of(node, struct panfrost_gem_mapping, mmnode);
|
2019-07-27 06:09:43 +08:00
|
|
|
}
|
|
|
|
|
2018-09-11 03:27:58 +08:00
|
|
|
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
|
|
|
|
|
|
|
|
struct drm_gem_object *
|
|
|
|
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
|
|
|
|
struct dma_buf_attachment *attach,
|
|
|
|
struct sg_table *sgt);
|
|
|
|
|
2019-07-12 05:56:14 +08:00
|
|
|
struct panfrost_gem_object *
|
|
|
|
panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
|
|
|
struct drm_device *dev, size_t size,
|
|
|
|
u32 flags,
|
|
|
|
uint32_t *handle);
|
|
|
|
|
2019-11-29 21:59:05 +08:00
|
|
|
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
|
|
|
void panfrost_gem_close(struct drm_gem_object *obj,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
|
2020-01-16 10:15:54 +08:00
|
|
|
struct panfrost_gem_mapping *
|
|
|
|
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
|
|
|
struct panfrost_file_priv *priv);
|
|
|
|
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
|
|
|
|
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
|
|
|
|
|
2019-08-05 22:33:58 +08:00
|
|
|
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
|
|
|
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
|
|
|
|
2018-09-11 03:27:58 +08:00
|
|
|
#endif /* __PANFROST_GEM_H__ */
|