drm/ttm: always keep BOs on the LRU
This allows blocking for BOs to become available in the memory management. Amdgpu is doing this for quite a while now during CS. Now apply the new behavior to all drivers using TTM. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Thomas Hellstrom <thellstrom@vmware.com> Link: https://patchwork.freedesktop.org/patch/332878/
This commit is contained in:
parent
7fb03cc3e0
commit
9165fb879f
|
@ -586,7 +586,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
|
|||
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
||||
false, &ctx->duplicates, true);
|
||||
false, &ctx->duplicates);
|
||||
if (!ret)
|
||||
ctx->reserved = true;
|
||||
else {
|
||||
|
@ -659,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
|||
}
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
||||
false, &ctx->duplicates, true);
|
||||
false, &ctx->duplicates);
|
||||
if (!ret)
|
||||
ctx->reserved = true;
|
||||
else
|
||||
|
@ -1797,8 +1797,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
|||
}
|
||||
|
||||
/* Reserve all BOs and page tables for validation */
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
|
||||
true);
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
|
||||
WARN(!list_empty(&duplicates), "Duplicates should be empty");
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
@ -1996,7 +1995,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||
}
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
|
||||
false, &duplicate_save, true);
|
||||
false, &duplicate_save);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
|
||||
goto ttm_reserve_fail;
|
||||
|
|
|
@ -650,7 +650,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
}
|
||||
|
||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
||||
&duplicates, false);
|
||||
&duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
||||
|
|
|
@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
list_add(&csa_tv.head, &list);
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
|
||||
return r;
|
||||
|
|
|
@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%d)\n", r);
|
||||
|
@ -613,7 +613,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r)
|
||||
goto error_unref;
|
||||
|
||||
|
|
|
@ -4494,7 +4494,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
tv.num_shared = 1;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
|
||||
return r;
|
||||
|
|
|
@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
|
|||
return 0;
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
|
||||
!no_intr, NULL, true);
|
||||
!no_intr, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
|
|||
bo = entry->bo;
|
||||
|
||||
dma_resv_add_shared_fence(bo->base.resv, &release->base);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
|
|
@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
if (!vm_bos)
|
||||
return;
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
|
||||
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
|
||||
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -192,18 +192,12 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
|
|||
}
|
||||
}
|
||||
|
||||
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_add_to_lru);
|
||||
|
||||
static void ttm_bo_ref_bug(struct kref *list_kref)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
bool notify = false;
|
||||
|
@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
|||
bdev->driver->del_from_lru_notify(bo);
|
||||
}
|
||||
|
||||
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->bdev->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
|
||||
|
||||
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
|
@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
||||
|
||||
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||
switch (bo->mem.mem_type) {
|
||||
|
@ -511,7 +495,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||
*/
|
||||
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
|
||||
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
}
|
||||
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
|
@ -895,17 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
ret = ttm_bo_evict(bo, ctx);
|
||||
if (locked) {
|
||||
if (locked)
|
||||
ttm_bo_unreserve(bo);
|
||||
} else {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
return ret;
|
||||
|
@ -1067,12 +1045,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
|
|||
mem->mem_type = mem_type;
|
||||
mem->placement = cur_flags;
|
||||
|
||||
if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
|
||||
spin_lock(&bo->bdev->glob->lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, mem);
|
||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||
}
|
||||
spin_lock(&bo->bdev->glob->lru_lock);
|
||||
ttm_bo_del_from_lru(bo);
|
||||
ttm_bo_add_mem_to_lru(bo, mem);
|
||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1377,11 +1353,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||
spin_lock(&bdev->glob->lru_lock);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
spin_unlock(&bdev->glob->lru_lock);
|
||||
}
|
||||
spin_lock(&bdev->glob->lru_lock);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
spin_unlock(&bdev->glob->lru_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -43,16 +43,6 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
|
|||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
ttm_bo_del_from_lru(bo);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list)
|
||||
{
|
||||
|
@ -69,8 +59,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
if (list_empty(&bo->lru))
|
||||
ttm_bo_add_to_lru(bo);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
@ -94,7 +83,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
|||
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups, bool del_lru)
|
||||
struct list_head *dups)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
@ -168,11 +157,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
|||
list_add(&entry->head, list);
|
||||
}
|
||||
|
||||
if (del_lru) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||
|
@ -199,10 +183,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
|||
dma_resv_add_shared_fence(bo->base.resv, fence);
|
||||
else
|
||||
dma_resv_add_excl_fence(bo->base.resv, fence);
|
||||
if (list_empty(&bo->lru))
|
||||
ttm_bo_add_to_lru(bo);
|
||||
else
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
|
|
@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
|
|||
val_buf->bo = &res->backup->base;
|
||||
val_buf->num_shared = 0;
|
||||
list_add_tail(&val_buf->head, &val_list);
|
||||
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
|
||||
true);
|
||||
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reserve;
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
|
|||
bool intr)
|
||||
{
|
||||
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
|
||||
NULL, true);
|
||||
NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -360,30 +360,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
*/
|
||||
void ttm_bo_put(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_add_to_lru
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Add this bo to the relevant mem type lru and, if it's backed by
|
||||
* system pages (ttms) to the swap list.
|
||||
* This function must be called with struct ttm_bo_global::lru_lock held, and
|
||||
* is typically called immediately prior to unreserving a bo.
|
||||
*/
|
||||
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_del_from_lru
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Remove this bo from all lru lists used to lookup and reserve an object.
|
||||
* This function must be called with struct ttm_bo_global::lru_lock held,
|
||||
* and is usually called just immediately after the bo has been reserved to
|
||||
* avoid recursive reservation from lru lists.
|
||||
*/
|
||||
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_move_to_lru_tail
|
||||
*
|
||||
|
|
|
@ -631,9 +631,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
|
|||
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
|
||||
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
|
||||
|
||||
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
|
||||
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* __ttm_bo_reserve:
|
||||
*
|
||||
|
@ -727,15 +724,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
|||
bool interruptible, bool no_wait,
|
||||
struct ww_acquire_ctx *ticket)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(!kref_read(&bo->kref));
|
||||
|
||||
ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
|
||||
if (likely(ret == 0))
|
||||
ttm_bo_del_sub_from_lru(bo);
|
||||
|
||||
return ret;
|
||||
return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -762,9 +753,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
|
|||
else
|
||||
dma_resv_lock_slow(bo->base.resv, ticket);
|
||||
|
||||
if (likely(ret == 0))
|
||||
ttm_bo_del_sub_from_lru(bo);
|
||||
else if (ret == -EINTR)
|
||||
if (ret == -EINTR)
|
||||
ret = -ERESTARTSYS;
|
||||
|
||||
return ret;
|
||||
|
@ -780,10 +769,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
|
|||
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
||||
{
|
||||
spin_lock(&bo->bdev->glob->lru_lock);
|
||||
if (list_empty(&bo->lru))
|
||||
ttm_bo_add_to_lru(bo);
|
||||
else
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||
|
||||
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups, bool del_lru);
|
||||
struct list_head *dups);
|
||||
|
||||
/**
|
||||
* function ttm_eu_fence_buffer_objects.
|
||||
|
|
Loading…
Reference in New Issue