drm/i915: Move request->ctx aside
In the next patch, we want to store the intel_context pointer inside i915_request, as it is frequently access via a convoluted dance when submitting the request to hw. Having two context pointers inside i915_request leads to confusion so first rename the existing i915_gem_context pointer to i915_request.gem_context. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
c8af5274c3
commit
4e0d64dba8
|
@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
static inline bool is_gvt_request(struct i915_request *req)
|
static inline bool is_gvt_request(struct i915_request *req)
|
||||||
{
|
{
|
||||||
return i915_gem_context_force_single_submission(req->ctx);
|
return i915_gem_context_force_single_submission(req->gem_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
|
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
|
||||||
|
@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
||||||
struct i915_request *req = workload->req;
|
struct i915_request *req = workload->req;
|
||||||
|
|
||||||
if (IS_KABYLAKE(req->i915) &&
|
if (IS_KABYLAKE(req->i915) &&
|
||||||
is_inhibit_context(req->ctx, req->engine->id))
|
is_inhibit_context(req->gem_context, req->engine->id))
|
||||||
intel_vgpu_restore_inhibit_context(vgpu, req);
|
intel_vgpu_restore_inhibit_context(vgpu, req);
|
||||||
|
|
||||||
/* allocate shadow ring buffer */
|
/* allocate shadow ring buffer */
|
||||||
|
|
|
@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||||
struct i915_request,
|
struct i915_request,
|
||||||
client_link);
|
client_link);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
task = pid_task(request && request->ctx->pid ?
|
task = pid_task(request && request->gem_context->pid ?
|
||||||
request->ctx->pid : file->pid,
|
request->gem_context->pid : file->pid,
|
||||||
PIDTYPE_PID);
|
PIDTYPE_PID);
|
||||||
print_file_stats(m, task ? task->comm : "<unknown>", stats);
|
print_file_stats(m, task ? task->comm : "<unknown>", stats);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -3067,7 +3067,7 @@ static void skip_request(struct i915_request *request)
|
||||||
static void engine_skip_context(struct i915_request *request)
|
static void engine_skip_context(struct i915_request *request)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = request->engine;
|
struct intel_engine_cs *engine = request->engine;
|
||||||
struct i915_gem_context *hung_ctx = request->ctx;
|
struct i915_gem_context *hung_ctx = request->gem_context;
|
||||||
struct i915_timeline *timeline = request->timeline;
|
struct i915_timeline *timeline = request->timeline;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -3077,7 +3077,7 @@ static void engine_skip_context(struct i915_request *request)
|
||||||
spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
list_for_each_entry_continue(request, &engine->timeline.requests, link)
|
list_for_each_entry_continue(request, &engine->timeline.requests, link)
|
||||||
if (request->ctx == hung_ctx)
|
if (request->gem_context == hung_ctx)
|
||||||
skip_request(request);
|
skip_request(request);
|
||||||
|
|
||||||
list_for_each_entry(request, &timeline->requests, link)
|
list_for_each_entry(request, &timeline->requests, link)
|
||||||
|
@ -3123,11 +3123,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stalled) {
|
if (stalled) {
|
||||||
i915_gem_context_mark_guilty(request->ctx);
|
i915_gem_context_mark_guilty(request->gem_context);
|
||||||
skip_request(request);
|
skip_request(request);
|
||||||
|
|
||||||
/* If this context is now banned, skip all pending requests. */
|
/* If this context is now banned, skip all pending requests. */
|
||||||
if (i915_gem_context_is_banned(request->ctx))
|
if (i915_gem_context_is_banned(request->gem_context))
|
||||||
engine_skip_context(request);
|
engine_skip_context(request);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -3137,7 +3137,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
|
||||||
*/
|
*/
|
||||||
request = i915_gem_find_active_request(engine);
|
request = i915_gem_find_active_request(engine);
|
||||||
if (request) {
|
if (request) {
|
||||||
i915_gem_context_mark_innocent(request->ctx);
|
i915_gem_context_mark_innocent(request->gem_context);
|
||||||
dma_fence_set_error(&request->fence, -EAGAIN);
|
dma_fence_set_error(&request->fence, -EAGAIN);
|
||||||
|
|
||||||
/* Rewind the engine to replay the incomplete rq */
|
/* Rewind the engine to replay the incomplete rq */
|
||||||
|
|
|
@ -1287,9 +1287,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||||
static void record_request(struct i915_request *request,
|
static void record_request(struct i915_request *request,
|
||||||
struct drm_i915_error_request *erq)
|
struct drm_i915_error_request *erq)
|
||||||
{
|
{
|
||||||
erq->context = request->ctx->hw_id;
|
struct i915_gem_context *ctx = request->gem_context;
|
||||||
|
|
||||||
|
erq->context = ctx->hw_id;
|
||||||
erq->sched_attr = request->sched.attr;
|
erq->sched_attr = request->sched.attr;
|
||||||
erq->ban_score = atomic_read(&request->ctx->ban_score);
|
erq->ban_score = atomic_read(&ctx->ban_score);
|
||||||
erq->seqno = request->global_seqno;
|
erq->seqno = request->global_seqno;
|
||||||
erq->jiffies = request->emitted_jiffies;
|
erq->jiffies = request->emitted_jiffies;
|
||||||
erq->start = i915_ggtt_offset(request->ring->vma);
|
erq->start = i915_ggtt_offset(request->ring->vma);
|
||||||
|
@ -1297,7 +1299,7 @@ static void record_request(struct i915_request *request,
|
||||||
erq->tail = request->tail;
|
erq->tail = request->tail;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
|
erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1461,12 +1463,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
|
||||||
|
|
||||||
request = i915_gem_find_active_request(engine);
|
request = i915_gem_find_active_request(engine);
|
||||||
if (request) {
|
if (request) {
|
||||||
|
struct i915_gem_context *ctx = request->gem_context;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
|
||||||
ee->vm = request->ctx->ppgtt ?
|
ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
|
||||||
&request->ctx->ppgtt->base : &ggtt->base;
|
|
||||||
|
|
||||||
record_context(&ee->context, request->ctx);
|
record_context(&ee->context, ctx);
|
||||||
|
|
||||||
/* We need to copy these to an anonymous buffer
|
/* We need to copy these to an anonymous buffer
|
||||||
* as the simplest method to avoid being overwritten
|
* as the simplest method to avoid being overwritten
|
||||||
|
@ -1483,11 +1485,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
|
||||||
|
|
||||||
ee->ctx =
|
ee->ctx =
|
||||||
i915_error_object_create(i915,
|
i915_error_object_create(i915,
|
||||||
to_intel_context(request->ctx,
|
to_intel_context(ctx,
|
||||||
engine)->state);
|
engine)->state);
|
||||||
|
|
||||||
error->simulated |=
|
error->simulated |=
|
||||||
i915_gem_context_no_error_capture(request->ctx);
|
i915_gem_context_no_error_capture(ctx);
|
||||||
|
|
||||||
ee->rq_head = request->head;
|
ee->rq_head = request->head;
|
||||||
ee->rq_post = request->postfix;
|
ee->rq_post = request->postfix;
|
||||||
|
|
|
@ -384,7 +384,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
|
||||||
*/
|
*/
|
||||||
if (engine->last_retired_context)
|
if (engine->last_retired_context)
|
||||||
intel_context_unpin(engine->last_retired_context, engine);
|
intel_context_unpin(engine->last_retired_context, engine);
|
||||||
engine->last_retired_context = rq->ctx;
|
engine->last_retired_context = rq->gem_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __retire_engine_upto(struct intel_engine_cs *engine,
|
static void __retire_engine_upto(struct intel_engine_cs *engine,
|
||||||
|
@ -455,8 +455,8 @@ static void i915_request_retire(struct i915_request *request)
|
||||||
i915_request_remove_from_client(request);
|
i915_request_remove_from_client(request);
|
||||||
|
|
||||||
/* Retirement decays the ban score as it is a sign of ctx progress */
|
/* Retirement decays the ban score as it is a sign of ctx progress */
|
||||||
atomic_dec_if_positive(&request->ctx->ban_score);
|
atomic_dec_if_positive(&request->gem_context->ban_score);
|
||||||
intel_context_unpin(request->ctx, request->engine);
|
intel_context_unpin(request->gem_context, request->engine);
|
||||||
|
|
||||||
__retire_engine_upto(request->engine, request);
|
__retire_engine_upto(request->engine, request);
|
||||||
|
|
||||||
|
@ -760,7 +760,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||||
INIT_LIST_HEAD(&rq->active_list);
|
INIT_LIST_HEAD(&rq->active_list);
|
||||||
rq->i915 = i915;
|
rq->i915 = i915;
|
||||||
rq->engine = engine;
|
rq->engine = engine;
|
||||||
rq->ctx = ctx;
|
rq->gem_context = ctx;
|
||||||
rq->ring = ring;
|
rq->ring = ring;
|
||||||
rq->timeline = ring->timeline;
|
rq->timeline = ring->timeline;
|
||||||
GEM_BUG_ON(rq->timeline == &engine->timeline);
|
GEM_BUG_ON(rq->timeline == &engine->timeline);
|
||||||
|
@ -814,7 +814,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||||
goto err_unwind;
|
goto err_unwind;
|
||||||
|
|
||||||
/* Keep a second pin for the dual retirement along engine and ring */
|
/* Keep a second pin for the dual retirement along engine and ring */
|
||||||
__intel_context_pin(rq->ctx, engine);
|
__intel_context_pin(rq->gem_context, engine);
|
||||||
|
|
||||||
/* Check that we didn't interrupt ourselves with a new request */
|
/* Check that we didn't interrupt ourselves with a new request */
|
||||||
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
|
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
|
||||||
|
@ -1113,7 +1113,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
|
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
|
||||||
if (engine->schedule)
|
if (engine->schedule)
|
||||||
engine->schedule(request, &request->ctx->sched);
|
engine->schedule(request, &request->gem_context->sched);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
i915_sw_fence_commit(&request->submit);
|
i915_sw_fence_commit(&request->submit);
|
||||||
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
|
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
|
||||||
|
|
|
@ -93,7 +93,7 @@ struct i915_request {
|
||||||
* i915_request_free() will then decrement the refcount on the
|
* i915_request_free() will then decrement the refcount on the
|
||||||
* context.
|
* context.
|
||||||
*/
|
*/
|
||||||
struct i915_gem_context *ctx;
|
struct i915_gem_context *gem_context;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
struct i915_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
|
|
|
@ -624,7 +624,7 @@ TRACE_EVENT(i915_request_queue,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->i915->drm.primary->index;
|
__entry->dev = rq->i915->drm.primary->index;
|
||||||
__entry->hw_id = rq->ctx->hw_id;
|
__entry->hw_id = rq->gem_context->hw_id;
|
||||||
__entry->ring = rq->engine->id;
|
__entry->ring = rq->engine->id;
|
||||||
__entry->ctx = rq->fence.context;
|
__entry->ctx = rq->fence.context;
|
||||||
__entry->seqno = rq->fence.seqno;
|
__entry->seqno = rq->fence.seqno;
|
||||||
|
@ -651,7 +651,7 @@ DECLARE_EVENT_CLASS(i915_request,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->i915->drm.primary->index;
|
__entry->dev = rq->i915->drm.primary->index;
|
||||||
__entry->hw_id = rq->ctx->hw_id;
|
__entry->hw_id = rq->gem_context->hw_id;
|
||||||
__entry->ring = rq->engine->id;
|
__entry->ring = rq->engine->id;
|
||||||
__entry->ctx = rq->fence.context;
|
__entry->ctx = rq->fence.context;
|
||||||
__entry->seqno = rq->fence.seqno;
|
__entry->seqno = rq->fence.seqno;
|
||||||
|
@ -696,7 +696,7 @@ TRACE_EVENT(i915_request_in,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->i915->drm.primary->index;
|
__entry->dev = rq->i915->drm.primary->index;
|
||||||
__entry->hw_id = rq->ctx->hw_id;
|
__entry->hw_id = rq->gem_context->hw_id;
|
||||||
__entry->ring = rq->engine->id;
|
__entry->ring = rq->engine->id;
|
||||||
__entry->ctx = rq->fence.context;
|
__entry->ctx = rq->fence.context;
|
||||||
__entry->seqno = rq->fence.seqno;
|
__entry->seqno = rq->fence.seqno;
|
||||||
|
@ -727,7 +727,7 @@ TRACE_EVENT(i915_request_out,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->i915->drm.primary->index;
|
__entry->dev = rq->i915->drm.primary->index;
|
||||||
__entry->hw_id = rq->ctx->hw_id;
|
__entry->hw_id = rq->gem_context->hw_id;
|
||||||
__entry->ring = rq->engine->id;
|
__entry->ring = rq->engine->id;
|
||||||
__entry->ctx = rq->fence.context;
|
__entry->ctx = rq->fence.context;
|
||||||
__entry->seqno = rq->fence.seqno;
|
__entry->seqno = rq->fence.seqno;
|
||||||
|
@ -815,7 +815,7 @@ TRACE_EVENT(i915_request_wait_begin,
|
||||||
*/
|
*/
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->i915->drm.primary->index;
|
__entry->dev = rq->i915->drm.primary->index;
|
||||||
__entry->hw_id = rq->ctx->hw_id;
|
__entry->hw_id = rq->gem_context->hw_id;
|
||||||
__entry->ring = rq->engine->id;
|
__entry->ring = rq->engine->id;
|
||||||
__entry->ctx = rq->fence.context;
|
__entry->ctx = rq->fence.context;
|
||||||
__entry->seqno = rq->fence.seqno;
|
__entry->seqno = rq->fence.seqno;
|
||||||
|
|
|
@ -1020,7 +1020,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
rq = __i915_gem_active_peek(&engine->timeline.last_request);
|
rq = __i915_gem_active_peek(&engine->timeline.last_request);
|
||||||
if (rq)
|
if (rq)
|
||||||
return rq->ctx == kernel_context;
|
return rq->gem_context == kernel_context;
|
||||||
else
|
else
|
||||||
return engine->last_retired_context == kernel_context;
|
return engine->last_retired_context == kernel_context;
|
||||||
}
|
}
|
||||||
|
|
|
@ -513,8 +513,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct intel_guc_client *client = guc->execbuf_client;
|
struct intel_guc_client *client = guc->execbuf_client;
|
||||||
struct intel_engine_cs *engine = rq->engine;
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
|
u32 ctx_desc =
|
||||||
engine));
|
lower_32_bits(intel_lr_context_descriptor(rq->gem_context,
|
||||||
|
engine));
|
||||||
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
|
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
|
||||||
|
|
||||||
spin_lock(&client->wq_lock);
|
spin_lock(&client->wq_lock);
|
||||||
|
@ -725,7 +726,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
|
||||||
struct i915_request *rq, *rn;
|
struct i915_request *rq, *rn;
|
||||||
|
|
||||||
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
|
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
|
||||||
if (last && rq->ctx != last->ctx) {
|
if (last && rq->gem_context != last->gem_context) {
|
||||||
if (port == last_port) {
|
if (port == last_port) {
|
||||||
__list_del_many(&p->requests,
|
__list_del_many(&p->requests,
|
||||||
&rq->sched.link);
|
&rq->sched.link);
|
||||||
|
|
|
@ -418,9 +418,10 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
|
||||||
|
|
||||||
static u64 execlists_update_context(struct i915_request *rq)
|
static u64 execlists_update_context(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
|
struct intel_context *ce =
|
||||||
|
to_intel_context(rq->gem_context, rq->engine);
|
||||||
struct i915_hw_ppgtt *ppgtt =
|
struct i915_hw_ppgtt *ppgtt =
|
||||||
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
|
rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
|
||||||
u32 *reg_state = ce->lrc_reg_state;
|
u32 *reg_state = ce->lrc_reg_state;
|
||||||
|
|
||||||
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
|
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
|
||||||
|
@ -679,7 +680,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
* second request, and so we never need to tell the
|
* second request, and so we never need to tell the
|
||||||
* hardware about the first.
|
* hardware about the first.
|
||||||
*/
|
*/
|
||||||
if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
|
if (last && !can_merge_ctx(rq->gem_context,
|
||||||
|
last->gem_context)) {
|
||||||
/*
|
/*
|
||||||
* If we are on the second port and cannot
|
* If we are on the second port and cannot
|
||||||
* combine this request with the last, then we
|
* combine this request with the last, then we
|
||||||
|
@ -698,14 +700,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
* the same context (even though a different
|
* the same context (even though a different
|
||||||
* request) to the second port.
|
* request) to the second port.
|
||||||
*/
|
*/
|
||||||
if (ctx_single_port_submission(last->ctx) ||
|
if (ctx_single_port_submission(last->gem_context) ||
|
||||||
ctx_single_port_submission(rq->ctx)) {
|
ctx_single_port_submission(rq->gem_context)) {
|
||||||
__list_del_many(&p->requests,
|
__list_del_many(&p->requests,
|
||||||
&rq->sched.link);
|
&rq->sched.link);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
GEM_BUG_ON(last->ctx == rq->ctx);
|
GEM_BUG_ON(last->gem_context == rq->gem_context);
|
||||||
|
|
||||||
if (submit)
|
if (submit)
|
||||||
port_assign(port, last);
|
port_assign(port, last);
|
||||||
|
@ -1437,7 +1439,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
|
||||||
static int execlists_request_alloc(struct i915_request *request)
|
static int execlists_request_alloc(struct i915_request *request)
|
||||||
{
|
{
|
||||||
struct intel_context *ce =
|
struct intel_context *ce =
|
||||||
to_intel_context(request->ctx, request->engine);
|
to_intel_context(request->gem_context, request->engine);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
GEM_BUG_ON(!ce->pin_count);
|
GEM_BUG_ON(!ce->pin_count);
|
||||||
|
@ -1954,7 +1956,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
|
||||||
* future request will be after userspace has had the opportunity
|
* future request will be after userspace has had the opportunity
|
||||||
* to recreate its own state.
|
* to recreate its own state.
|
||||||
*/
|
*/
|
||||||
regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
|
regs = to_intel_context(request->gem_context, engine)->lrc_reg_state;
|
||||||
if (engine->default_state) {
|
if (engine->default_state) {
|
||||||
void *defaults;
|
void *defaults;
|
||||||
|
|
||||||
|
@ -1967,7 +1969,8 @@ static void execlists_reset(struct intel_engine_cs *engine,
|
||||||
i915_gem_object_unpin_map(engine->default_state);
|
i915_gem_object_unpin_map(engine->default_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
execlists_init_reg_state(regs, request->ctx, engine, request->ring);
|
execlists_init_reg_state(regs,
|
||||||
|
request->gem_context, engine, request->ring);
|
||||||
|
|
||||||
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
|
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
|
||||||
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
|
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
|
||||||
|
@ -1989,7 +1992,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
|
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
|
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
|
||||||
struct intel_engine_cs *engine = rq->engine;
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
|
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
|
@ -2028,15 +2031,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
|
||||||
* it is unsafe in case of lite-restore (because the ctx is
|
* it is unsafe in case of lite-restore (because the ctx is
|
||||||
* not idle). PML4 is allocated during ppgtt init so this is
|
* not idle). PML4 is allocated during ppgtt init so this is
|
||||||
* not needed in 48-bit.*/
|
* not needed in 48-bit.*/
|
||||||
if (rq->ctx->ppgtt &&
|
if (rq->gem_context->ppgtt &&
|
||||||
(intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
|
(intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
|
||||||
!i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
|
!i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
|
||||||
!intel_vgpu_active(rq->i915)) {
|
!intel_vgpu_active(rq->i915)) {
|
||||||
ret = intel_logical_ring_emit_pdps(rq);
|
ret = intel_logical_ring_emit_pdps(rq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
|
rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
cs = intel_ring_begin(rq, 6);
|
cs = intel_ring_begin(rq, 6);
|
||||||
|
|
|
@ -571,8 +571,8 @@ static void reset_ring(struct intel_engine_cs *engine,
|
||||||
*/
|
*/
|
||||||
if (request) {
|
if (request) {
|
||||||
struct drm_i915_private *dev_priv = request->i915;
|
struct drm_i915_private *dev_priv = request->i915;
|
||||||
struct intel_context *ce = to_intel_context(request->ctx,
|
struct intel_context *ce =
|
||||||
engine);
|
to_intel_context(request->gem_context, engine);
|
||||||
struct i915_hw_ppgtt *ppgtt;
|
struct i915_hw_ppgtt *ppgtt;
|
||||||
|
|
||||||
if (ce->state) {
|
if (ce->state) {
|
||||||
|
@ -584,7 +584,7 @@ static void reset_ring(struct intel_engine_cs *engine,
|
||||||
CCID_EN);
|
CCID_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
|
ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
|
||||||
if (ppgtt) {
|
if (ppgtt) {
|
||||||
u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
|
u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
|
||||||
|
|
||||||
|
@ -1458,7 +1458,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
||||||
|
|
||||||
*cs++ = MI_NOOP;
|
*cs++ = MI_NOOP;
|
||||||
*cs++ = MI_SET_CONTEXT;
|
*cs++ = MI_SET_CONTEXT;
|
||||||
*cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
|
*cs++ = i915_ggtt_offset(to_intel_context(rq->gem_context, engine)->state) | flags;
|
||||||
/*
|
/*
|
||||||
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
||||||
* WaMiSetContext_Hang:snb,ivb,vlv
|
* WaMiSetContext_Hang:snb,ivb,vlv
|
||||||
|
@ -1526,7 +1526,7 @@ static int remap_l3(struct i915_request *rq, int slice)
|
||||||
static int switch_context(struct i915_request *rq)
|
static int switch_context(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = rq->engine;
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
struct i915_gem_context *to_ctx = rq->ctx;
|
struct i915_gem_context *to_ctx = rq->gem_context;
|
||||||
struct i915_hw_ppgtt *to_mm =
|
struct i915_hw_ppgtt *to_mm =
|
||||||
to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
|
to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
|
||||||
struct i915_gem_context *from_ctx = engine->legacy_active_context;
|
struct i915_gem_context *from_ctx = engine->legacy_active_context;
|
||||||
|
@ -1597,7 +1597,7 @@ static int ring_request_alloc(struct i915_request *request)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
|
GEM_BUG_ON(!to_intel_context(request->gem_context, request->engine)->pin_count);
|
||||||
|
|
||||||
/* Flush enough space to reduce the likelihood of waiting after
|
/* Flush enough space to reduce the likelihood of waiting after
|
||||||
* we start building the request - in which case we will just
|
* we start building the request - in which case we will just
|
||||||
|
|
|
@ -105,7 +105,10 @@ static int emit_recurse_batch(struct hang *h,
|
||||||
struct i915_request *rq)
|
struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = h->i915;
|
struct drm_i915_private *i915 = h->i915;
|
||||||
struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
|
struct i915_address_space *vm =
|
||||||
|
rq->gem_context->ppgtt ?
|
||||||
|
&rq->gem_context->ppgtt->base :
|
||||||
|
&i915->ggtt.base;
|
||||||
struct i915_vma *hws, *vma;
|
struct i915_vma *hws, *vma;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
u32 *batch;
|
u32 *batch;
|
||||||
|
|
|
@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
|
||||||
struct i915_request *rq,
|
struct i915_request *rq,
|
||||||
u32 arbitration_command)
|
u32 arbitration_command)
|
||||||
{
|
{
|
||||||
struct i915_address_space *vm = &rq->ctx->ppgtt->base;
|
struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
|
||||||
struct i915_vma *hws, *vma;
|
struct i915_vma *hws, *vma;
|
||||||
u32 *batch;
|
u32 *batch;
|
||||||
int err;
|
int err;
|
||||||
|
|
Loading…
Reference in New Issue