diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3db5851756f0..7b871802ae36 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -372,7 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value |= I915_SCHEDULER_CAP_ENABLED; value |= I915_SCHEDULER_CAP_PRIORITY; - if (INTEL_INFO(dev_priv)->has_logical_ring_preemption && + if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) && i915_modparams.enable_execlists && !i915_modparams.enable_guc_submission) value |= I915_SCHEDULER_CAP_PREEMPTION; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 366ba74b0ad2..61c155cbf9d7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3140,6 +3140,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ ((dev_priv)->info.has_logical_ring_contexts) +#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ + ((dev_priv)->info.has_logical_ring_preemption) #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index fedb839dff61..3ac876ca6cae 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -620,7 +620,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) * Similarly the preempt context must always be available so that * we can interrupt the engine at any time. */ - if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) { + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { ring = engine->context_pin(engine, engine->i915->preempt_context); if (IS_ERR(ring)) { @@ -651,7 +651,7 @@ err_rs_fini: err_breadcrumbs: intel_engine_fini_breadcrumbs(engine); err_unpin_preempt: - if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) engine->context_unpin(engine, engine->i915->preempt_context); err_unpin_kernel: engine->context_unpin(engine, engine->i915->kernel_context); @@ -679,7 +679,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_engine_cleanup_cmd_parser(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) engine->context_unpin(engine, engine->i915->preempt_context); engine->context_unpin(engine, engine->i915->kernel_context); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 599c709fc5a7..b5d382ef8d85 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -354,7 +354,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq) assert_ring_tail_valid(rq->ring, rq->tail); } -static void unwind_incomplete_requests(struct intel_engine_cs *engine) +static void __unwind_incomplete_requests(struct intel_engine_cs *engine) { struct drm_i915_gem_request *rq, *rn; struct i915_priolist *uninitialized_var(p); @@ -385,6 +385,17 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine) } } +static void +execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) +{ + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + spin_lock_irq(&engine->timeline->lock); + __unwind_incomplete_requests(engine); + spin_unlock_irq(&engine->timeline->lock); +} + static inline void execlists_context_status_change(struct drm_i915_gem_request *rq, unsigned long status) @@ -515,11 +526,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine) elsp_write(ce->lrc_desc, elsp); } -static bool can_preempt(struct intel_engine_cs *engine) -{ - return INTEL_INFO(engine->i915)->has_logical_ring_preemption; -} - static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -567,7 +573,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (port_count(&port[0]) > 1) goto unlock; - if (can_preempt(engine) && + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && rb_entry(rb, struct i915_priolist, node)->priority > max(last->priotree.priority, 0)) { /* @@ -691,7 +697,7 @@ unlock: } static void -execlist_cancel_port_requests(struct intel_engine_execlists *execlists) +execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) { struct execlist_port *port = execlists->port; unsigned int num_ports = execlists_num_ports(execlists); @@ -718,7 +724,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) spin_lock_irqsave(&engine->timeline->lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ - execlist_cancel_port_requests(execlists); + execlists_cancel_port_requests(execlists); /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline->requests, link) { @@ -858,11 +864,8 @@ static void intel_lrc_irq_handler(unsigned long data) if (status & GEN8_CTX_STATUS_ACTIVE_IDLE && buf[2*head + 1] == PREEMPT_ID) { - execlist_cancel_port_requests(execlists); - - spin_lock_irq(&engine->timeline->lock); - unwind_incomplete_requests(engine); - spin_unlock_irq(&engine->timeline->lock); + execlists_cancel_port_requests(execlists); + execlists_unwind_incomplete_requests(execlists); GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); @@ -1531,10 +1534,10 @@ static void reset_common_ring(struct intel_engine_cs *engine, * guessing the missed context-switch events by looking at what * requests were completed. */ - execlist_cancel_port_requests(execlists); + execlists_cancel_port_requests(execlists); /* Push back any incomplete requests for replay after the reset. */ - unwind_incomplete_requests(engine); + __unwind_incomplete_requests(engine); spin_unlock_irqrestore(&engine->timeline->lock, flags);