dma-buf: fix busy wait for new shared fences

When reservation_object_add_shared_fence is replacing an old fence with a new
one we should not drop the old one before the new one is in place.

Otherwise other cores can busy wait for the new one to appear.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/322030/
This commit is contained in:
Christian König 2019-08-05 11:14:27 +02:00
parent 178e5f3a5b
commit 93505ee7d0
1 changed files with 7 additions and 7 deletions

View File

@ -196,6 +196,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence)
{
struct reservation_object_list *fobj;
struct dma_fence *old;
unsigned int i, count;
dma_fence_get(fence);
@ -209,18 +210,16 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
write_seqcount_begin(&obj->seq);
for (i = 0; i < count; ++i) {
struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
if (old_fence->context == fence->context ||
dma_fence_is_signaled(old_fence)) {
dma_fence_put(old_fence);
old = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
if (old->context == fence->context ||
dma_fence_is_signaled(old))
goto replace;
}
}
BUG_ON(fobj->shared_count >= fobj->shared_max);
old = NULL;
count++;
replace:
@ -230,6 +229,7 @@ replace:
write_seqcount_end(&obj->seq);
preempt_enable();
dma_fence_put(old);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);