2018-05-08 22:20:54 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-01-16 23:09:51 +08:00
|
|
|
/*
|
2018-05-08 22:20:54 +08:00
|
|
|
* Copyright (C) 2017-2018 Etnaviv Project
|
2017-01-16 23:09:51 +08:00
|
|
|
*/
|
|
|
|
|
2019-06-30 13:21:03 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
2017-01-17 00:29:57 +08:00
|
|
|
#include <drm/drm_mm.h>
|
|
|
|
|
2017-01-16 23:09:51 +08:00
|
|
|
#include "etnaviv_cmdbuf.h"
|
2019-07-06 01:17:21 +08:00
|
|
|
#include "etnaviv_gem.h"
|
2017-01-16 23:09:51 +08:00
|
|
|
#include "etnaviv_gpu.h"
|
|
|
|
#include "etnaviv_mmu.h"
|
2017-09-24 21:15:24 +08:00
|
|
|
#include "etnaviv_perfmon.h"
|
2017-01-16 23:09:51 +08:00
|
|
|
|
2019-07-06 01:17:22 +08:00
|
|
|
#define SUBALLOC_SIZE SZ_512K
|
2017-01-17 00:29:57 +08:00
|
|
|
#define SUBALLOC_GRANULE SZ_4K
|
|
|
|
#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
|
|
|
|
|
|
|
|
struct etnaviv_cmdbuf_suballoc {
|
|
|
|
/* suballocated dma buffer properties */
|
2019-07-06 01:17:22 +08:00
|
|
|
struct device *dev;
|
2017-01-17 00:29:57 +08:00
|
|
|
void *vaddr;
|
|
|
|
dma_addr_t paddr;
|
|
|
|
|
|
|
|
/* allocation management */
|
|
|
|
struct mutex lock;
|
|
|
|
DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
|
|
|
|
int free_space;
|
|
|
|
wait_queue_head_t free_event;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct etnaviv_cmdbuf_suballoc *
|
2019-07-06 01:17:22 +08:00
|
|
|
etnaviv_cmdbuf_suballoc_new(struct device *dev)
|
2017-01-17 00:29:57 +08:00
|
|
|
{
|
|
|
|
struct etnaviv_cmdbuf_suballoc *suballoc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
|
|
|
|
if (!suballoc)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2019-07-06 01:17:22 +08:00
|
|
|
suballoc->dev = dev;
|
2017-01-17 00:29:57 +08:00
|
|
|
mutex_init(&suballoc->lock);
|
|
|
|
init_waitqueue_head(&suballoc->free_event);
|
|
|
|
|
2019-08-02 20:27:33 +08:00
|
|
|
BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
|
2019-07-06 01:17:22 +08:00
|
|
|
suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
|
2017-01-17 00:29:57 +08:00
|
|
|
&suballoc->paddr, GFP_KERNEL);
|
2019-07-06 01:15:35 +08:00
|
|
|
if (!suballoc->vaddr) {
|
|
|
|
ret = -ENOMEM;
|
2017-01-17 00:29:57 +08:00
|
|
|
goto free_suballoc;
|
2019-07-06 01:15:35 +08:00
|
|
|
}
|
2017-01-17 00:29:57 +08:00
|
|
|
|
|
|
|
return suballoc;
|
|
|
|
|
|
|
|
free_suballoc:
|
|
|
|
kfree(suballoc);
|
|
|
|
|
2019-07-06 01:15:35 +08:00
|
|
|
return ERR_PTR(ret);
|
2017-01-17 00:29:57 +08:00
|
|
|
}
|
|
|
|
|
2019-07-06 01:17:21 +08:00
|
|
|
int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
struct etnaviv_iommu_context *context,
|
2019-07-06 01:17:21 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping,
|
|
|
|
u32 memory_base)
|
|
|
|
{
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
|
2019-07-06 01:17:21 +08:00
|
|
|
suballoc->paddr, SUBALLOC_SIZE);
|
|
|
|
}
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
|
2019-07-06 01:17:21 +08:00
|
|
|
struct etnaviv_vram_mapping *mapping)
|
|
|
|
{
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 01:17:24 +08:00
|
|
|
etnaviv_iommu_put_suballoc_va(context, mapping);
|
2019-07-06 01:17:21 +08:00
|
|
|
}
|
|
|
|
|
2017-01-17 00:29:57 +08:00
|
|
|
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
|
|
|
|
{
|
2019-07-06 01:17:22 +08:00
|
|
|
dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
|
2017-01-17 00:29:57 +08:00
|
|
|
suballoc->paddr);
|
|
|
|
kfree(suballoc);
|
|
|
|
}
|
|
|
|
|
2017-11-24 23:56:37 +08:00
|
|
|
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
|
|
|
|
struct etnaviv_cmdbuf *cmdbuf, u32 size)
|
2017-01-16 23:09:51 +08:00
|
|
|
{
|
2017-01-17 00:29:57 +08:00
|
|
|
int granule_offs, order, ret;
|
2017-01-16 23:09:51 +08:00
|
|
|
|
2017-01-17 00:29:57 +08:00
|
|
|
cmdbuf->suballoc = suballoc;
|
|
|
|
cmdbuf->size = size;
|
2017-01-16 23:09:51 +08:00
|
|
|
|
2017-01-17 00:29:57 +08:00
|
|
|
order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
|
|
|
|
retry:
|
|
|
|
mutex_lock(&suballoc->lock);
|
|
|
|
granule_offs = bitmap_find_free_region(suballoc->granule_map,
|
|
|
|
SUBALLOC_GRANULES, order);
|
|
|
|
if (granule_offs < 0) {
|
|
|
|
suballoc->free_space = 0;
|
|
|
|
mutex_unlock(&suballoc->lock);
|
|
|
|
ret = wait_event_interruptible_timeout(suballoc->free_event,
|
|
|
|
suballoc->free_space,
|
|
|
|
msecs_to_jiffies(10 * 1000));
|
|
|
|
if (!ret) {
|
2019-07-06 01:17:22 +08:00
|
|
|
dev_err(suballoc->dev,
|
2017-01-17 00:29:57 +08:00
|
|
|
"Timeout waiting for cmdbuf space\n");
|
2017-11-24 23:56:37 +08:00
|
|
|
return -ETIMEDOUT;
|
2017-01-17 00:29:57 +08:00
|
|
|
}
|
|
|
|
goto retry;
|
2017-01-16 23:09:51 +08:00
|
|
|
}
|
2017-01-17 00:29:57 +08:00
|
|
|
mutex_unlock(&suballoc->lock);
|
|
|
|
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
|
|
|
|
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
|
2017-01-16 23:09:51 +08:00
|
|
|
|
2017-11-24 23:56:37 +08:00
|
|
|
return 0;
|
2017-01-16 23:09:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
|
|
|
{
|
2017-01-17 00:29:57 +08:00
|
|
|
struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
|
|
|
|
int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
|
|
|
|
SUBALLOC_GRANULE);
|
|
|
|
|
|
|
|
mutex_lock(&suballoc->lock);
|
|
|
|
bitmap_release_region(suballoc->granule_map,
|
|
|
|
cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
|
|
|
|
order);
|
|
|
|
suballoc->free_space = 1;
|
|
|
|
mutex_unlock(&suballoc->lock);
|
|
|
|
wake_up_all(&suballoc->free_event);
|
2017-01-16 23:09:51 +08:00
|
|
|
}
|
2017-01-16 23:52:44 +08:00
|
|
|
|
2019-07-06 01:17:21 +08:00
|
|
|
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
|
|
|
|
struct etnaviv_vram_mapping *mapping)
|
2017-01-16 23:52:44 +08:00
|
|
|
{
|
2019-07-06 01:17:21 +08:00
|
|
|
return mapping->iova + buf->suballoc_offset;
|
2017-01-16 23:52:44 +08:00
|
|
|
}
|
2017-01-17 00:00:08 +08:00
|
|
|
|
|
|
|
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
|
|
|
|
{
|
2017-01-17 00:29:57 +08:00
|
|
|
return buf->suballoc->paddr + buf->suballoc_offset;
|
2017-01-17 00:00:08 +08:00
|
|
|
}
|