drm/radeon/kms: fix indirect buffer management V2

There is 3 different distinct states for an indirect buffer (IB) :
  1- free with no fence
  2- free with a fence
  3- non free (fence doesn't matter)
Previous code mixed case 2 & 3 in a single one leading to possible
catastrophique failure. This patch rework the handling and properly
separate each case. So when you get ib we set the ib as non free and
fence status doesn't matter. Fence become active (ie has a meaning
for the ib code) once the ib is scheduled or free. This patch also
get rid of the alloc bitmap as it was overkill, we know go through
IB pool list like in a ring buffer as the oldest IB is the first
one the will be free.

Fix :
https://bugs.freedesktop.org/show_bug.cgi?id=26438
and likely other bugs.

V2 remove the scheduled list, it's useless now, fix free ib scanning

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Jerome Glisse 2010-02-15 21:36:13 +01:00 committed by Dave Airlie
parent 2e98f10a7a
commit e821767beb
3 changed files with 45 additions and 72 deletions

View File

@ -541,9 +541,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev)
{ {
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
} }

View File

@ -97,6 +97,7 @@ extern int radeon_audio;
* symbol; * symbol;
*/ */
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16 #define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32 #define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4 #define RADEONFB_CONN_LIMIT 4
@ -371,11 +372,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/ */
struct radeon_ib { struct radeon_ib {
struct list_head list; struct list_head list;
unsigned long idx; unsigned idx;
uint64_t gpu_addr; uint64_t gpu_addr;
struct radeon_fence *fence; struct radeon_fence *fence;
uint32_t *ptr; uint32_t *ptr;
uint32_t length_dw; uint32_t length_dw;
bool free;
}; };
/* /*
@ -385,11 +387,10 @@ struct radeon_ib {
struct radeon_ib_pool { struct radeon_ib_pool {
struct mutex mutex; struct mutex mutex;
struct radeon_bo *robj; struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct list_head bogus_ib; struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready; bool ready;
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); unsigned head_id;
}; };
struct radeon_cp { struct radeon_cp {

View File

@ -71,68 +71,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{ {
struct radeon_fence *fence; struct radeon_fence *fence;
struct radeon_ib *nib; struct radeon_ib *nib;
unsigned long i; int r = 0, i, c;
int r = 0;
*ib = NULL; *ib = NULL;
r = radeon_fence_create(rdev, &fence); r = radeon_fence_create(rdev, &fence);
if (r) { if (r) {
DRM_ERROR("failed to create fence for new IB\n"); dev_err(rdev->dev, "failed to create fence for new IB\n");
return r; return r;
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
if (i < RADEON_IB_POOL_SIZE) { i &= (RADEON_IB_POOL_SIZE - 1);
set_bit(i, rdev->ib_pool.alloc_bm); if (rdev->ib_pool.ibs[i].free) {
rdev->ib_pool.ibs[i].length_dw = 0; nib = &rdev->ib_pool.ibs[i];
*ib = &rdev->ib_pool.ibs[i]; break;
mutex_unlock(&rdev->ib_pool.mutex);
goto out;
} }
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
/* we go do nothings here */
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("all IB allocated none scheduled.\n");
r = -EINVAL;
goto out;
} }
/* get the first ib on the scheduled list */ if (nib == NULL) {
nib = list_entry(rdev->ib_pool.scheduled_ibs.next, /* This should never happen, it means we allocated all
struct radeon_ib, list); * IB and haven't scheduled one yet, return EBUSY to
if (nib->fence == NULL) { * userspace hoping that on ioctl recall we get better
/* we go do nothings here */ * luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); radeon_fence_unref(&fence);
r = -EINVAL; return -EBUSY;
goto out;
} }
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib->free = false;
if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false); r = radeon_fence_wait(nib->fence, false);
if (r) { if (r) {
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
(unsigned long)nib->gpu_addr, nib->length_dw); nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); mutex_lock(&rdev->ib_pool.mutex);
goto out; nib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
} }
radeon_fence_unref(&nib->fence); radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0; nib->length_dw = 0;
/* scheduled list is accessed here */
mutex_lock(&rdev->ib_pool.mutex);
list_del(&nib->list);
INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib; *ib = nib;
out: return 0;
if (r) {
radeon_fence_unref(&fence);
} else {
(*ib)->fence = fence;
}
return r;
} }
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@ -144,18 +131,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
return; return;
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { tmp->free = true;
/* IB is scheduled & not signaled don't do anythings */
mutex_unlock(&rdev->ib_pool.mutex);
return;
}
list_del(&tmp->list);
INIT_LIST_HEAD(&tmp->list);
if (tmp->fence)
radeon_fence_unref(&tmp->fence);
tmp->length_dw = 0;
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
} }
@ -165,7 +141,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) { if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */ /* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL; return -EINVAL;
} }
@ -178,7 +154,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib); radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence); radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); /* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev); radeon_ring_unlock_commit(rdev);
return 0; return 0;
@ -195,7 +172,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0; return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */ /* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj); &rdev->ib_pool.robj);
@ -226,9 +202,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset; rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i; rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0; rdev->ib_pool.ibs[i].length_dw = 0;
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); rdev->ib_pool.ibs[i].free = true;
} }
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true; rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n"); DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) { if (radeon_debugfs_ib_init(rdev)) {
@ -246,7 +222,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
} }
mutex_lock(&rdev->ib_pool.mutex); mutex_lock(&rdev->ib_pool.mutex);
radeon_ib_bogus_cleanup(rdev); radeon_ib_bogus_cleanup(rdev);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) { if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false); r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) { if (likely(r == 0)) {
@ -395,7 +370,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) { if (ib == NULL) {
return 0; return 0;
} }
seq_printf(m, "IB %04lu\n", ib->idx); seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence); seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw); seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) { for (i = 0; i < ib->length_dw; i++) {