drm/i915: Remove forced stop ring on suspend/unload

Before suspending (or unloading), we would first wait upon all rendering
to be completed and then disable the rings. This later step is a remanent
from DRI1 days when we did not use request tracking for all operations
upon the ring. Now that we are sure we are waiting upon the very last
operation by the engine, we can forgo clobbering the ring registers,
though we do keep the assert that the engine is indeed idle before
sleeping.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470388464-28458-5-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-08-05 10:14:10 +01:00
parent f826ee21e5
commit 90f4fcd56b
4 changed files with 0 additions and 61 deletions

View File

@ -2004,7 +2004,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
void (*cleanup_engine)(struct intel_engine_cs *engine);
void (*stop_engine)(struct intel_engine_cs *engine);
/**
* Is the GPU currently considered idle, or busy executing

View File

@ -4080,16 +4080,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
return NULL;
}
static void
i915_gem_stop_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
dev_priv->gt.stop_engine(engine);
}
int
i915_gem_suspend(struct drm_device *dev)
{
@ -4118,12 +4108,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev_priv);
/* Note that rather than stopping the engines, all we have to do
* is assert that every RING_HEAD == RING_TAIL (all execution complete)
* and similar for all logical context images (to ensure they are
* all ready for hibernation).
*/
i915_gem_stop_engines(dev);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
@ -4308,10 +4292,8 @@ int i915_gem_init(struct drm_device *dev)
if (!i915.enable_execlists) {
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
dev_priv->gt.stop_engine = intel_engine_stop;
} else {
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
dev_priv->gt.stop_engine = intel_logical_ring_stop;
}
/* This is just a security blanket to placate dragons.

View File

@ -760,31 +760,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
}
}
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
return;
ret = intel_engine_idle(engine);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(dev_priv,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE, MODE_IDLE,
1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@ -1717,7 +1692,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
intel_logical_ring_stop(engine);
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
}

View File

@ -2203,7 +2203,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
intel_engine_stop(engine);
WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
@ -2907,18 +2906,3 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
return intel_init_ring_buffer(engine);
}
void intel_engine_stop(struct intel_engine_cs *engine)
{
int ret;
if (!intel_engine_initialized(engine))
return;
ret = intel_engine_idle(engine);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
stop_ring(engine);
}