drm/ttm: add BO priorities for the LRUs

This way the driver can specify a priority for a BO which has the effect that
a BO is only evicted when all other BOs with a lower priority are evicted
first.

Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger.He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2017-01-10 14:08:28 +01:00 committed by Alex Deucher
parent 2ee7fc92cf
commit cf6c467d67
4 changed files with 52 additions and 27 deletions

View File

@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
lru->lru[j] = &adev->mman.bdev.man[j].lru; lru->lru[j] = &adev->mman.bdev.man[j].lru[0];
lru->swap_lru = &adev->mman.bdev.glob->swap_lru; lru->swap_lru = &adev->mman.bdev.glob->swap_lru[0];
} }
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)

View File

@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
{ {
return bo->bdev->man[bo->mem.mem_type].lru.prev; return bo->bdev->man[bo->mem.mem_type].lru[bo->priority].prev;
} }
EXPORT_SYMBOL(ttm_bo_default_lru_tail); EXPORT_SYMBOL(ttm_bo_default_lru_tail);
struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
{ {
return bo->glob->swap_lru.prev; return bo->glob->swap_lru[bo->priority].prev;
} }
EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
@ -741,20 +741,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count; int ret = -EBUSY, put_count;
unsigned i;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
ret = __ttm_bo_reserve(bo, false, true, NULL); list_for_each_entry(bo, &man->lru[i], lru) {
if (ret) ret = __ttm_bo_reserve(bo, false, true, NULL);
continue; if (ret)
continue;
if (place && !bdev->driver->eviction_valuable(bo, place)) { if (place && !bdev->driver->eviction_valuable(bo,
__ttm_bo_unreserve(bo); place)) {
ret = -EBUSY; __ttm_bo_unreserve(bo);
continue; ret = -EBUSY;
continue;
}
break;
} }
break; if (!ret)
break;
} }
if (ret) { if (ret) {
@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
} }
atomic_inc(&bo->glob->bo_count); atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node); drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
/* /*
* For ttm_bo_type_device buffers, allocate * For ttm_bo_type_device buffers, allocate
@ -1297,18 +1305,21 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
unsigned i;
/* /*
* Can't use standard list traversal since we're unlocking. * Can't use standard list traversal since we're unlocking.
*/ */
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
spin_unlock(&glob->lru_lock); while (!list_empty(&man->lru[i])) {
ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); spin_unlock(&glob->lru_lock);
if (ret) ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
return ret; if (ret)
spin_lock(&glob->lru_lock); return ret;
spin_lock(&glob->lru_lock);
}
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
@ -1385,6 +1396,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
{ {
int ret = -EINVAL; int ret = -EINVAL;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
unsigned i;
BUG_ON(type >= TTM_NUM_MEM_TYPES); BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type]; man = &bdev->man[type];
@ -1410,7 +1422,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->use_type = true; man->use_type = true;
man->size = p_size; man->size = p_size;
INIT_LIST_HEAD(&man->lru); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL; man->move = NULL;
return 0; return 0;
@ -1442,6 +1455,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
container_of(ref, struct ttm_bo_global_ref, ref); container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object; struct ttm_bo_global *glob = ref->object;
int ret; int ret;
unsigned i;
mutex_init(&glob->device_list_mutex); mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock); spin_lock_init(&glob->lru_lock);
@ -1453,7 +1467,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_drp; goto out_no_drp;
} }
INIT_LIST_HEAD(&glob->swap_lru); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list); INIT_LIST_HEAD(&glob->device_list);
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
@ -1512,8 +1527,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
if (list_empty(&bdev->ddestroy)) if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n"); TTM_DEBUG("Delayed destroy list was clean\n");
if (list_empty(&bdev->man[0].lru)) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
TTM_DEBUG("Swap list was clean\n"); if (list_empty(&bdev->man[0].lru[0]))
TTM_DEBUG("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager); drm_vma_offset_manager_destroy(&bdev->vma_manager);
@ -1665,10 +1681,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
int ret = -EBUSY; int ret = -EBUSY;
int put_count; int put_count;
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
unsigned i;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) { for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
ret = __ttm_bo_reserve(bo, false, true, NULL); list_for_each_entry(bo, &glob->swap_lru[i], swap) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret)
break;
}
if (!ret) if (!ret)
break; break;
} }

View File

@ -215,6 +215,8 @@ struct ttm_buffer_object {
struct drm_vma_offset_node vma_node; struct drm_vma_offset_node vma_node;
unsigned priority;
/** /**
* Special members that are protected by the reserve lock * Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with * and the bo::lock when written to. Can be read with

View File

@ -42,6 +42,8 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/reservation.h> #include <linux/reservation.h>
#define TTM_MAX_BO_PRIORITY 16
struct ttm_backend_func { struct ttm_backend_func {
/** /**
* struct ttm_backend_func member bind * struct ttm_backend_func member bind
@ -298,7 +300,7 @@ struct ttm_mem_type_manager {
* Protected by the global->lru_lock. * Protected by the global->lru_lock.
*/ */
struct list_head lru; struct list_head lru[TTM_MAX_BO_PRIORITY];
/* /*
* Protected by @move_lock. * Protected by @move_lock.
@ -518,7 +520,7 @@ struct ttm_bo_global {
/** /**
* Protected by the lru_lock. * Protected by the lru_lock.
*/ */
struct list_head swap_lru; struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
/** /**
* Internal protection. * Internal protection.