drm/ttm: cleanup coding style and implementation.

Only functional change is to always keep io_reserved_count up to date
for debugging even when it is not used otherwise.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/378242/
This commit is contained in:
Christian König 2020-07-15 13:22:56 +02:00
parent ce74773305
commit c1c440d41a
1 changed files with 48 additions and 49 deletions

View File

@ -115,39 +115,35 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) bo = list_first_entry_or_null(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
if (!bo)
return -ENOSPC; return -ENOSPC;
bo = list_first_entry(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
list_del_init(&bo->io_reserve_lru); list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo); ttm_bo_unmap_virtual_locked(bo);
return 0; return 0;
} }
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0; int ret;
if (mem->bus.io_reserved_count++)
return 0;
if (!bdev->driver->io_mem_reserve) if (!bdev->driver->io_mem_reserve)
return 0; return 0;
if (likely(!man->use_io_reserve_lru))
return bdev->driver->io_mem_reserve(bdev, mem);
if (bdev->driver->io_mem_reserve &&
mem->bus.io_reserved_count++ == 0) {
retry: retry:
ret = bdev->driver->io_mem_reserve(bdev, mem); ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
ret = ttm_mem_io_evict(man); ret = ttm_mem_io_evict(man);
if (ret == 0) if (ret == 0)
goto retry; goto retry;
}
} }
return ret; return ret;
} }
@ -155,35 +151,31 @@ retry:
void ttm_mem_io_free(struct ttm_bo_device *bdev, void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; if (--mem->bus.io_reserved_count)
if (likely(!man->use_io_reserve_lru))
return; return;
if (bdev->driver->io_mem_reserve && if (!bdev->driver->io_mem_free)
--mem->bus.io_reserved_count == 0 && return;
bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
bdev->driver->io_mem_free(bdev, mem);
} }
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{ {
struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type];
struct ttm_mem_reg *mem = &bo->mem; struct ttm_mem_reg *mem = &bo->mem;
int ret; int ret;
if (!mem->bus.io_reserved_vm) { if (mem->bus.io_reserved_vm)
struct ttm_mem_type_manager *man = return 0;
&bo->bdev->man[mem->mem_type];
ret = ttm_mem_io_reserve(bo->bdev, mem); ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
mem->bus.io_reserved_vm = true; mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru) if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru, list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru); &man->io_reserve_lru);
}
return 0; return 0;
} }
@ -191,15 +183,17 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{ {
struct ttm_mem_reg *mem = &bo->mem; struct ttm_mem_reg *mem = &bo->mem;
if (mem->bus.io_reserved_vm) { if (!mem->bus.io_reserved_vm)
mem->bus.io_reserved_vm = false; return;
list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem); mem->bus.io_reserved_vm = false;
} list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem);
} }
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
void **virtual) struct ttm_mem_reg *mem,
void **virtual)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret; int ret;
@ -216,9 +210,11 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
addr = mem->bus.addr; addr = mem->bus.addr;
} else { } else {
if (mem->placement & TTM_PL_FLAG_WC) if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); addr = ioremap_wc(mem->bus.base + mem->bus.offset,
mem->bus.size);
else else
addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); addr = ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!addr) { if (!addr) {
(void) ttm_mem_io_lock(man, false); (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem); ttm_mem_io_free(bdev, mem);
@ -230,8 +226,9 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
return 0; return 0;
} }
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
void *virtual) struct ttm_mem_reg *mem,
void *virtual)
{ {
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
@ -513,11 +510,13 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
} else { } else {
map->bo_kmap_type = ttm_bo_map_iomap; map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC) if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, map->virtual = ioremap_wc(bo->mem.bus.base +
bo->mem.bus.offset + offset,
size); size);
else else
map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset, map->virtual = ioremap(bo->mem.bus.base +
size); bo->mem.bus.offset + offset,
size);
} }
return (!map->virtual) ? -ENOMEM : 0; return (!map->virtual) ? -ENOMEM : 0;
} }