2018-05-08 22:20:54 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2015-12-04 01:21:29 +08:00
|
|
|
/*
|
2018-05-08 22:20:54 +08:00
|
|
|
* Copyright (C) 2015-2018 Etnaviv Project
|
2015-12-04 01:21:29 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ETNAVIV_GPU_H__
|
|
|
|
#define __ETNAVIV_GPU_H__
|
|
|
|
|
2017-11-24 23:56:37 +08:00
|
|
|
#include "etnaviv_cmdbuf.h"
|
2019-07-06 01:17:21 +08:00
|
|
|
#include "etnaviv_gem.h"
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
#include "etnaviv_mmu.h"
|
2015-12-04 01:21:29 +08:00
|
|
|
#include "etnaviv_drv.h"
|
|
|
|
|
|
|
|
struct etnaviv_gem_submit;
|
2016-01-21 23:20:50 +08:00
|
|
|
struct etnaviv_vram_mapping;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
struct etnaviv_chip_identity {
|
|
|
|
u32 model;
|
|
|
|
u32 revision;
|
2020-01-06 23:16:47 +08:00
|
|
|
u32 product_id;
|
|
|
|
u32 customer_id;
|
|
|
|
u32 eco_id;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
/* Supported feature fields. */
|
|
|
|
u32 features;
|
|
|
|
|
|
|
|
/* Supported minor feature fields. */
|
|
|
|
u32 minor_features0;
|
|
|
|
u32 minor_features1;
|
|
|
|
u32 minor_features2;
|
|
|
|
u32 minor_features3;
|
2016-01-25 01:36:04 +08:00
|
|
|
u32 minor_features4;
|
|
|
|
u32 minor_features5;
|
2018-01-22 22:56:11 +08:00
|
|
|
u32 minor_features6;
|
|
|
|
u32 minor_features7;
|
|
|
|
u32 minor_features8;
|
|
|
|
u32 minor_features9;
|
|
|
|
u32 minor_features10;
|
|
|
|
u32 minor_features11;
|
2016-01-25 01:36:04 +08:00
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
/* Number of streams supported. */
|
|
|
|
u32 stream_count;
|
|
|
|
|
|
|
|
/* Total number of temporary registers per thread. */
|
|
|
|
u32 register_max;
|
|
|
|
|
|
|
|
/* Maximum number of threads. */
|
|
|
|
u32 thread_count;
|
|
|
|
|
|
|
|
/* Number of shader cores. */
|
|
|
|
u32 shader_core_count;
|
|
|
|
|
|
|
|
/* Size of the vertex cache. */
|
|
|
|
u32 vertex_cache_size;
|
|
|
|
|
|
|
|
/* Number of entries in the vertex output buffer. */
|
|
|
|
u32 vertex_output_buffer_size;
|
|
|
|
|
|
|
|
/* Number of pixel pipes. */
|
|
|
|
u32 pixel_pipes;
|
|
|
|
|
|
|
|
/* Number of instructions. */
|
|
|
|
u32 instruction_count;
|
|
|
|
|
|
|
|
/* Number of constants. */
|
|
|
|
u32 num_constants;
|
|
|
|
|
|
|
|
/* Buffer size */
|
|
|
|
u32 buffer_size;
|
2016-01-25 01:36:04 +08:00
|
|
|
|
|
|
|
/* Number of varyings */
|
|
|
|
u8 varyings_count;
|
2015-12-04 01:21:29 +08:00
|
|
|
};
|
|
|
|
|
2018-01-22 19:35:14 +08:00
|
|
|
enum etnaviv_sec_mode {
|
|
|
|
ETNA_SEC_NONE = 0,
|
|
|
|
ETNA_SEC_KERNEL,
|
|
|
|
ETNA_SEC_TZ
|
|
|
|
};
|
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
struct etnaviv_event {
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence *fence;
|
2017-11-24 19:02:38 +08:00
|
|
|
struct etnaviv_gem_submit *submit;
|
2017-09-24 21:15:28 +08:00
|
|
|
|
|
|
|
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
|
2015-12-04 01:21:29 +08:00
|
|
|
};
|
|
|
|
|
2017-01-17 00:29:57 +08:00
|
|
|
struct etnaviv_cmdbuf_suballoc;
|
2018-10-15 18:49:07 +08:00
|
|
|
struct regulator;
|
|
|
|
struct clk;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
2017-09-24 21:15:19 +08:00
|
|
|
#define ETNA_NR_EVENTS 30
|
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
struct etnaviv_gpu {
|
|
|
|
struct drm_device *drm;
|
2017-03-13 03:00:59 +08:00
|
|
|
struct thermal_cooling_device *cooling;
|
2015-12-04 01:21:29 +08:00
|
|
|
struct device *dev;
|
|
|
|
struct mutex lock;
|
|
|
|
struct etnaviv_chip_identity identity;
|
2018-01-22 19:35:14 +08:00
|
|
|
enum etnaviv_sec_mode sec_mode;
|
2017-11-18 00:43:37 +08:00
|
|
|
struct workqueue_struct *wq;
|
2017-12-05 01:41:58 +08:00
|
|
|
struct drm_gpu_scheduler sched;
|
2019-07-06 01:17:20 +08:00
|
|
|
bool initialized;
|
2021-08-21 04:18:25 +08:00
|
|
|
bool fe_running;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
/* 'ring'-buffer: */
|
2017-11-24 23:56:37 +08:00
|
|
|
struct etnaviv_cmdbuf buffer;
|
2016-01-21 23:20:19 +08:00
|
|
|
int exec_state;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
/* event management: */
|
2017-09-24 21:15:19 +08:00
|
|
|
DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
|
|
|
|
struct etnaviv_event event[ETNA_NR_EVENTS];
|
2015-12-04 01:21:29 +08:00
|
|
|
struct completion event_free;
|
|
|
|
spinlock_t event_spinlock;
|
|
|
|
|
|
|
|
u32 idle_mask;
|
|
|
|
|
|
|
|
/* Fencing support */
|
2018-05-25 22:51:25 +08:00
|
|
|
struct mutex fence_lock;
|
2017-11-29 21:49:04 +08:00
|
|
|
struct idr fence_idr;
|
2015-12-04 01:21:29 +08:00
|
|
|
u32 next_fence;
|
|
|
|
u32 completed_fence;
|
|
|
|
wait_queue_head_t fence_event;
|
2016-06-01 21:10:02 +08:00
|
|
|
u64 fence_context;
|
2015-12-04 01:21:29 +08:00
|
|
|
spinlock_t fence_spinlock;
|
|
|
|
|
2017-09-24 21:15:28 +08:00
|
|
|
/* worker for handling 'sync' points: */
|
|
|
|
struct work_struct sync_point_work;
|
|
|
|
int sync_point_event;
|
|
|
|
|
2018-06-27 21:58:13 +08:00
|
|
|
/* hang detection */
|
|
|
|
u32 hangcheck_dma_addr;
|
2021-12-22 08:17:28 +08:00
|
|
|
u32 hangcheck_fence;
|
2018-06-27 21:58:13 +08:00
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
void __iomem *mmio;
|
|
|
|
int irq;
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
struct etnaviv_iommu_context *mmu_context;
|
2019-07-06 01:17:23 +08:00
|
|
|
unsigned int flush_seq;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
/* Power Control: */
|
|
|
|
struct clk *clk_bus;
|
2018-01-19 22:05:40 +08:00
|
|
|
struct clk *clk_reg;
|
2015-12-04 01:21:29 +08:00
|
|
|
struct clk *clk_core;
|
|
|
|
struct clk *clk_shader;
|
|
|
|
|
2017-03-13 03:00:59 +08:00
|
|
|
unsigned int freq_scale;
|
2017-04-11 21:54:50 +08:00
|
|
|
unsigned long base_rate_core;
|
|
|
|
unsigned long base_rate_shader;
|
2015-12-04 01:21:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
|
|
|
|
{
|
2018-04-19 21:55:40 +08:00
|
|
|
writel(data, gpu->mmio + reg);
|
2015-12-04 01:21:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
|
|
|
|
{
|
2018-04-19 21:55:40 +08:00
|
|
|
return readl(gpu->mmio + reg);
|
2015-12-04 01:21:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
|
|
|
|
|
|
|
|
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
|
2018-01-22 22:57:59 +08:00
|
|
|
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
|
|
|
|
#endif
|
|
|
|
|
2017-12-06 17:53:27 +08:00
|
|
|
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
|
2015-12-04 01:21:29 +08:00
|
|
|
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
|
|
|
|
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
|
2017-11-06 20:28:52 +08:00
|
|
|
u32 fence, struct drm_etnaviv_timespec *timeout);
|
2015-12-04 01:21:29 +08:00
|
|
|
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
|
2017-11-06 20:28:52 +08:00
|
|
|
struct etnaviv_gem_object *etnaviv_obj,
|
|
|
|
struct drm_etnaviv_timespec *timeout);
|
2017-12-05 01:41:58 +08:00
|
|
|
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
|
2015-12-04 01:21:29 +08:00
|
|
|
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
|
|
|
|
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
|
2016-08-17 21:16:57 +08:00
|
|
|
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
|
2016-08-17 21:27:52 +08:00
|
|
|
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
extern struct platform_driver etnaviv_gpu_driver;
|
|
|
|
|
|
|
|
#endif /* __ETNAVIV_GPU_H__ */
|