drm/i915: Reset logical ring contexts' head and tail during GPU reset
Work was getting left behind in LRC contexts during reset. This causes a hang if the GPU is reset when HEAD==TAIL because the context's ringbuffer head and tail don't get reset and retiring a request doesn't alter them, so the ring still appears full. Added a function intel_lr_context_reset() to reset head and tail on a LRC and its ringbuffer. Call intel_lr_context_reset() for each context in i915_gem_context_reset() when in execlists mode. Testcase: igt/pm_rps --run-subtest reset #bdw Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88096 Signed-off-by: Thomas Daniel <thomas.daniel@intel.com> Reviewed-by: Dave Gordon <david.s.gordon@intel.com> [danvet: Flatten control flow in the lrc reset code a notch.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
b766879106
commit
3e5b6f05a2
|
@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* In execlists mode we will unreference the context when the execlist
|
if (i915.enable_execlists) {
|
||||||
* queue is cleared and the requests destroyed.
|
struct intel_context *ctx;
|
||||||
*/
|
|
||||||
if (i915.enable_execlists)
|
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||||
|
intel_lr_context_reset(dev, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||||
|
|
|
@ -1991,3 +1991,38 @@ error_unpin_ctx:
|
||||||
drm_gem_object_unreference(&ctx_obj->base);
|
drm_gem_object_unreference(&ctx_obj->base);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void intel_lr_context_reset(struct drm_device *dev,
|
||||||
|
struct intel_context *ctx)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_ring(ring, dev_priv, i) {
|
||||||
|
struct drm_i915_gem_object *ctx_obj =
|
||||||
|
ctx->engine[ring->id].state;
|
||||||
|
struct intel_ringbuffer *ringbuf =
|
||||||
|
ctx->engine[ring->id].ringbuf;
|
||||||
|
uint32_t *reg_state;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
if (!ctx_obj)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (i915_gem_object_get_pages(ctx_obj)) {
|
||||||
|
WARN(1, "Failed get_pages for context obj\n");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
page = i915_gem_object_get_page(ctx_obj, 1);
|
||||||
|
reg_state = kmap_atomic(page);
|
||||||
|
|
||||||
|
reg_state[CTX_RING_HEAD+1] = 0;
|
||||||
|
reg_state[CTX_RING_TAIL+1] = 0;
|
||||||
|
|
||||||
|
kunmap_atomic(reg_state);
|
||||||
|
|
||||||
|
ringbuf->head = 0;
|
||||||
|
ringbuf->tail = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -73,6 +73,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||||
struct intel_engine_cs *ring);
|
struct intel_engine_cs *ring);
|
||||||
void intel_lr_context_unpin(struct intel_engine_cs *ring,
|
void intel_lr_context_unpin(struct intel_engine_cs *ring,
|
||||||
struct intel_context *ctx);
|
struct intel_context *ctx);
|
||||||
|
void intel_lr_context_reset(struct drm_device *dev,
|
||||||
|
struct intel_context *ctx);
|
||||||
|
|
||||||
/* Execlists */
|
/* Execlists */
|
||||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||||
|
|
Loading…
Reference in New Issue