drm/nouveau: move io_reserve_lru handling into the driver v5

While working on TTM cleanups I've found that the io_reserve_lru used by
Nouveau is actually not working at all.

In general we should remove driver specific handling from the memory
management, so this patch moves the io_reserve_lru handling into Nouveau
instead.

v2: don't call ttm_bo_unmap_virtual in nouveau_ttm_io_mem_reserve
v3: rebased and use both base and offset in the check
v4: fix small typos and test the patch
v5: rebased and keep the mem.bus init in TTM.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/388643/
This commit is contained in:
Christian König 2020-08-21 16:06:50 +02:00
parent d575ee1131
commit 141b15e591
4 changed files with 127 additions and 23 deletions

View File

@ -137,6 +137,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->pin_refcnt > 0);
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
@ -304,6 +305,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@ -574,6 +576,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_del_init(&nvbo->io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@ -888,6 +910,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
if (bo->destroy != nouveau_bo_del_ttm)
return;
nouveau_bo_del_io_reserve_lru(bo);
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) {
@ -1018,17 +1042,42 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data);
}
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
switch (reg->mem_type) {
case TTM_PL_TT:
if (mem->kind)
nvif_object_unmap_handle(&mem->mem.object);
break;
case TTM_PL_VRAM:
nvif_object_unmap_handle(&mem->mem.object);
break;
default:
break;
}
}
}
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
mutex_lock(&drm->ttm.io_reserve_mutex);
retry:
switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
return 0;
ret = 0;
goto out;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@ -1037,9 +1086,12 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
reg->bus.is_iomem = !drm->agp.cma;
}
#endif
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
!mem->kind) {
/* untiled */
ret = 0;
break;
}
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = reg->start << PAGE_SHIFT;
@ -1052,7 +1104,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
} args;
u64 handle, length;
u32 argc = 0;
int ret;
switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50:
@ -1078,39 +1129,47 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
&handle, &length);
if (ret != 1) {
if (WARN_ON(ret == 0))
return -EINVAL;
return ret;
ret = -EINVAL;
goto out;
}
reg->bus.base = 0;
reg->bus.offset = handle;
ret = 0;
}
break;
default:
return -EINVAL;
ret = -EINVAL;
}
return 0;
out:
if (ret == -ENOSPC) {
struct nouveau_bo *nvbo;
nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
typeof(*nvbo),
io_reserve_lru);
if (nvbo) {
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
goto retry;
}
}
mutex_unlock(&drm->ttm.io_reserve_mutex);
return ret;
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *mem = nouveau_mem(reg);
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
switch (reg->mem_type) {
case TTM_PL_TT:
if (mem->kind)
nvif_object_unmap_handle(&mem->mem.object);
break;
case TTM_PL_VRAM:
nvif_object_unmap_handle(&mem->mem.object);
break;
default:
break;
}
}
mutex_lock(&drm->ttm.io_reserve_mutex);
nouveau_ttm_io_mem_free_locked(drm, reg);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
static int

View File

@ -18,6 +18,7 @@ struct nouveau_bo {
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
struct list_head io_reserve_lru;
/* protected by ttm_bo_reserve() */
struct drm_file *reserved_by;
@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *

View File

@ -164,6 +164,8 @@ struct nouveau_drm {
int type_vram;
int type_host[2];
int type_ncoh[2];
struct mutex io_reserve_mutex;
struct list_head io_reserve_lru;
} ttm;
/* GEM interface support */

View File

@ -123,13 +123,51 @@ const struct ttm_resource_manager_func nv04_gart_manager = {
.free = nouveau_manager_del,
};
static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
pgprot_t prot;
vm_fault_t ret;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
nouveau_bo_add_io_reserve_lru(bo);
dma_resv_unlock(bo->base.resv);
return ret;
}
static struct vm_operations_struct nouveau_ttm_vm_ops = {
.fault = nouveau_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access
};
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
int ret;
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
if (ret)
return ret;
vma->vm_ops = &nouveau_ttm_vm_ops;
return 0;
}
static int
@ -173,7 +211,6 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
}
man->func = &nouveau_vram_manager;
man->use_io_reserve_lru = true;
ttm_resource_manager_init(man,
drm->gem.vram_available >> PAGE_SHIFT);
@ -339,6 +376,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
mutex_init(&drm->ttm.io_reserve_mutex);
INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;