drm/i915: fix context/engine cleanup order
Swap the order of context & engine cleanup, so that contexts are cleaned up first, and *then* engines. This is a more sensible order anyway, but in particular has become necessary since the 'intel_ring_initialized() must be simple and inline' patch, which now uses ring->dev as an 'initialised' flag, so it can now be NULL after engine teardown. This in turn can cause a problem in the context code, which (used to) check the ring->dev->struct_mutex -- causing a fault if ring->dev was NULL. Also rename the cleanup function to reflect what it actually does (cleanup engines, not a ringbuffer), and fix an annoying whitespace issue. v2: Also make the fix in i915_load_modeset_init, not just in i915_driver_unload (Chris Wilson) v3: Had extra stuff in it. v4: Reverted extra stuff (so we're back to v2). Rebased and updated commentary above (Dave Gordon). Signed-off-by: Nick Hoath <nicholas.hoath@intel.com> Signed-off-by: David Gordon <david.s.gordon@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1453504211-7982-2-git-send-email-david.s.gordon@intel.com
This commit is contained in:
parent
8c448cadd4
commit
1b39a917a9
|
@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
cleanup_gem:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup_irq:
|
||||
intel_guc_ucode_fini(dev);
|
||||
|
@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
|
||||
intel_guc_ucode_fini(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
|
|
|
@ -3058,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev);
|
|||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
|
|
|
@ -4913,7 +4913,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
req = i915_gem_request_alloc(ring, NULL);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -4926,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -4934,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -5009,7 +5009,7 @@ out_unlock:
|
|||
}
|
||||
|
||||
void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
i915_gem_cleanup_engines(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
|
@ -5018,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
|||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.cleanup_ring(ring);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
if (i915.enable_execlists) {
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
Loading…
Reference in New Issue