2018-05-08 22:20:54 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2015-12-04 01:21:29 +08:00
|
|
|
/*
|
2018-05-08 22:20:54 +08:00
|
|
|
* Copyright (C) 2015-2018 Etnaviv Project
|
2015-12-04 01:21:29 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ETNAVIV_MMU_H__
|
|
|
|
#define __ETNAVIV_MMU_H__
|
|
|
|
|
2017-09-07 23:06:28 +08:00
|
|
|
#define ETNAVIV_PROT_READ (1 << 0)
|
|
|
|
#define ETNAVIV_PROT_WRITE (1 << 1)
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
enum etnaviv_iommu_version {
|
|
|
|
ETNAVIV_IOMMU_V1 = 0,
|
|
|
|
ETNAVIV_IOMMU_V2,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct etnaviv_gpu;
|
|
|
|
struct etnaviv_vram_mapping;
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
struct etnaviv_iommu_global;
|
|
|
|
struct etnaviv_iommu_context;
|
2017-09-07 23:06:28 +08:00
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
struct etnaviv_iommu_ops {
|
|
|
|
struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
|
|
|
|
void (*free)(struct etnaviv_iommu_context *);
|
|
|
|
int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
|
2017-09-07 23:06:28 +08:00
|
|
|
phys_addr_t paddr, size_t size, int prot);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
|
2017-09-07 23:06:28 +08:00
|
|
|
size_t size);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
size_t (*dump_size)(struct etnaviv_iommu_context *);
|
|
|
|
void (*dump)(struct etnaviv_iommu_context *, void *);
|
|
|
|
void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
|
2017-09-07 23:06:28 +08:00
|
|
|
};
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
|
|
|
|
extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
|
|
|
|
|
|
|
|
#define ETNAVIV_PTA_SIZE SZ_4K
|
|
|
|
#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
|
|
|
|
|
|
|
|
struct etnaviv_iommu_global {
|
2017-09-07 23:06:28 +08:00
|
|
|
struct device *dev;
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
enum etnaviv_iommu_version version;
|
|
|
|
const struct etnaviv_iommu_ops *ops;
|
|
|
|
unsigned int use;
|
|
|
|
struct mutex lock;
|
|
|
|
|
2017-09-07 23:06:28 +08:00
|
|
|
void *bad_page_cpu;
|
|
|
|
dma_addr_t bad_page_dma;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
2019-07-06 01:17:27 +08:00
|
|
|
u32 memory_base;
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
/*
|
|
|
|
* This union holds members needed by either MMUv1 or MMUv2, which
|
|
|
|
* can not exist at the same time.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
struct etnaviv_iommu_context *shared_context;
|
|
|
|
} v1;
|
|
|
|
struct {
|
|
|
|
/* P(age) T(able) A(rray) */
|
|
|
|
u64 *pta_cpu;
|
|
|
|
dma_addr_t pta_dma;
|
|
|
|
struct spinlock pta_lock;
|
|
|
|
DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
|
|
|
|
} v2;
|
|
|
|
};
|
2015-12-04 01:21:29 +08:00
|
|
|
};
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
struct etnaviv_iommu_context {
|
|
|
|
struct kref refcount;
|
|
|
|
struct etnaviv_iommu_global *global;
|
2015-12-04 01:21:29 +08:00
|
|
|
|
|
|
|
/* memory manager for GPU address area */
|
|
|
|
struct mutex lock;
|
|
|
|
struct list_head mappings;
|
|
|
|
struct drm_mm mm;
|
2019-07-06 01:17:23 +08:00
|
|
|
unsigned int flush_seq;
|
2019-07-06 01:17:27 +08:00
|
|
|
|
|
|
|
/* Not part of the context, but needs to have the same lifetime */
|
|
|
|
struct etnaviv_vram_mapping cmdbuf_mapping;
|
2015-12-04 01:21:29 +08:00
|
|
|
};
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
|
|
|
|
void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
|
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
struct etnaviv_gem_object;
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
|
2015-12-04 01:21:29 +08:00
|
|
|
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
|
2019-08-02 20:19:38 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping, u64 va);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
|
2015-12-04 01:21:29 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping);
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
|
2019-07-06 01:17:21 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping,
|
|
|
|
u32 memory_base, dma_addr_t paddr,
|
|
|
|
size_t size);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
|
2019-07-06 01:17:21 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping);
|
2016-08-17 20:57:51 +08:00
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
|
|
|
|
void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
|
|
|
|
|
|
|
|
struct etnaviv_iommu_context *
|
2019-07-06 01:17:27 +08:00
|
|
|
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
|
|
|
|
struct etnaviv_cmdbuf_suballoc *suballoc);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
|
|
|
{
|
|
|
|
kref_get(&ctx->refcount);
|
|
|
|
}
|
|
|
|
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
|
|
|
|
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
|
|
|
|
struct etnaviv_iommu_context *ctx);
|
|
|
|
|
|
|
|
struct etnaviv_iommu_context *
|
|
|
|
etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
|
|
|
|
struct etnaviv_iommu_context *
|
|
|
|
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
|
2015-12-04 01:21:29 +08:00
|
|
|
|
2019-07-06 01:17:27 +08:00
|
|
|
u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context);
|
|
|
|
unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context);
|
|
|
|
|
2015-12-04 01:21:29 +08:00
|
|
|
#endif /* __ETNAVIV_MMU_H__ */
|