drm/i915: Introduce i915_timeline.mutex
A simple mutex used for guarding the flow of requests in and out of the timeline. In the short-term, it will be used only to guard the addition of requests into the timeline, taken on alloc and released on commit so that only one caller can construct a request into the timeline (important as the seqno and ring pointers must be serialised). This will be used by observers to ensure that the seqno/hwsp is stable. Later, when we have reduced retiring to only operate on a single timeline at a time, we can then use the mutex as the sole guard required for retiring. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190301110547.14758-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
b5773a3616
commit
3ef7114982
|
@ -563,6 +563,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||
return ERR_CAST(ce);
|
||||
|
||||
reserve_gt(i915);
|
||||
mutex_lock(&ce->ring->timeline->mutex);
|
||||
|
||||
/* Move our oldest request to the slab-cache (if not in use!) */
|
||||
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
|
||||
|
@ -688,6 +689,7 @@ err_unwind:
|
|||
|
||||
kmem_cache_free(global.slab_requests, rq);
|
||||
err_unreserve:
|
||||
mutex_unlock(&ce->ring->timeline->mutex);
|
||||
unreserve_gt(i915);
|
||||
intel_context_unpin(ce);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -880,7 +882,7 @@ void i915_request_add(struct i915_request *request)
|
|||
GEM_TRACE("%s fence %llx:%lld\n",
|
||||
engine->name, request->fence.context, request->fence.seqno);
|
||||
|
||||
lockdep_assert_held(&request->i915->drm.struct_mutex);
|
||||
lockdep_assert_held(&request->timeline->mutex);
|
||||
trace_i915_request_add(request);
|
||||
|
||||
/*
|
||||
|
@ -991,6 +993,8 @@ void i915_request_add(struct i915_request *request)
|
|||
*/
|
||||
if (prev && i915_request_completed(prev))
|
||||
i915_request_retire_upto(prev);
|
||||
|
||||
mutex_unlock(&request->timeline->mutex);
|
||||
}
|
||||
|
||||
static unsigned long local_clock_us(unsigned int *cpu)
|
||||
|
|
|
@ -162,6 +162,7 @@ int i915_timeline_init(struct drm_i915_private *i915,
|
|||
timeline->fence_context = dma_fence_context_alloc(1);
|
||||
|
||||
spin_lock_init(&timeline->lock);
|
||||
mutex_init(&timeline->mutex);
|
||||
|
||||
INIT_ACTIVE_REQUEST(&timeline->barrier);
|
||||
INIT_ACTIVE_REQUEST(&timeline->last_request);
|
||||
|
|
|
@ -44,6 +44,8 @@ struct i915_timeline {
|
|||
#define TIMELINE_CLIENT 0 /* default subclass */
|
||||
#define TIMELINE_ENGINE 1
|
||||
|
||||
struct mutex mutex; /* protects the flow of requests */
|
||||
|
||||
unsigned int pin_count;
|
||||
const u32 *hwsp_seqno;
|
||||
struct i915_vma *hwsp_ggtt;
|
||||
|
|
|
@ -141,14 +141,12 @@ static int igt_fence_wait(void *arg)
|
|||
err = -ENOMEM;
|
||||
goto out_locked;
|
||||
}
|
||||
mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
|
||||
|
||||
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
|
||||
pr_err("fence wait success before submit (expected timeout)!\n");
|
||||
goto out_device;
|
||||
goto out_locked;
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
i915_request_add(request);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
|
|||
timeline->fence_context = context;
|
||||
|
||||
spin_lock_init(&timeline->lock);
|
||||
mutex_init(&timeline->mutex);
|
||||
|
||||
INIT_ACTIVE_REQUEST(&timeline->barrier);
|
||||
INIT_ACTIVE_REQUEST(&timeline->last_request);
|
||||
|
|
Loading…
Reference in New Issue