drm/ttm: cope with reserved buffers on lru list in ttm_mem_evict_first, v2
Replace the goto loop with a simple for each loop, and only run the delayed destroy cleanup if we can reserve the buffer first. No race occurs, since lru lock is never dropped any more. An empty list and a list full of unreservable buffers both cause -EBUSY to be returned, which is identical to the previous situation, because previously buffers on the lru list were always guaranteed to be reservable. This should work since currently ttm guarantees items on the lru are always reservable, and reserving items blockingly with some bo held are enough to cause you to run into a deadlock. Currently this is not a concern since removal off the lru list and reservations are always done with atomically, but when this guarantee no longer holds, we have to handle this situation or end up with possible deadlocks. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
2b7b3ad2fb
commit
e7ab20197b
|
@ -811,47 +811,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|||
struct ttm_bo_global *glob = bdev->glob;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret, put_count = 0;
|
||||
int ret = -EBUSY, put_count;
|
||||
|
||||
retry:
|
||||
spin_lock(&glob->lru_lock);
|
||||
if (list_empty(&man->lru)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return -EBUSY;
|
||||
list_for_each_entry(bo, &man->lru, lru) {
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0);
|
||||
if (!ret)
|
||||
ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
|
||||
no_wait_gpu);
|
||||
else
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
|
||||
if (ret) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
|
||||
if (unlikely(ret == -EBUSY)) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
if (likely(!no_wait_reserve))
|
||||
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (!list_empty(&bo->ddestroy)) {
|
||||
ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
|
||||
no_wait_gpu);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
|
||||
/**
|
||||
* We *need* to retry after releasing the lru lock.
|
||||
*/
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
goto retry;
|
||||
return ret;
|
||||
}
|
||||
|
||||
put_count = ttm_bo_del_from_lru(bo);
|
||||
|
|
Loading…
Reference in New Issue