drm/i915: Lift intel_engines_resume() to callers

Since the reset path wants to recover the engines itself, it only wants
to reinitialise the hardware using i915_gem_init_hw(). Pull the call to
intel_engines_resume() to the module init/resume path so we can avoid it
during reset.

Fixes: 79ffac8599 ("drm/i915: Invert the GEM wakeref hierarchy")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-06-26 16:45:49 +01:00
parent 18398904ca
commit 092be382a2
7 changed files with 65 additions and 83 deletions

View File

@ -254,14 +254,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915); i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915); i915_gem_restore_fences(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
/* /*
* As we didn't flush the kernel context before suspend, we cannot * As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset * guarantee that the context image is complete. So let's just reset
* it and start again. * it and start again.
*/ */
intel_gt_resume(&i915->gt); if (intel_gt_resume(&i915->gt))
if (i915_gem_init_hw(i915))
goto err_wedged; goto err_wedged;
intel_uc_resume(i915); intel_uc_resume(i915);

View File

@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{ {
intel_wakeref_init(&engine->wakeref); intel_wakeref_init(&engine->wakeref);
} }
int intel_engines_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
intel_gt_pm_get(&i915->gt);
for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
intel_gt_pm_put(&i915->gt);
return err;
}

View File

@ -31,6 +31,4 @@ void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine);
int intel_engines_resume(struct drm_i915_private *i915);
#endif /* INTEL_ENGINE_PM_H */ #endif /* INTEL_ENGINE_PM_H */

View File

@ -5,6 +5,7 @@
*/ */
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_pm.h" #include "intel_pm.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
@ -122,10 +123,11 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
intel_engine_reset(engine, false); intel_engine_reset(engine, false);
} }
void intel_gt_resume(struct intel_gt *gt) int intel_gt_resume(struct intel_gt *gt)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
int err = 0;
/* /*
* After resume, we may need to poke into the pinned kernel * After resume, we may need to poke into the pinned kernel
@ -133,9 +135,12 @@ void intel_gt_resume(struct intel_gt *gt)
* Only the kernel contexts should remain pinned over suspend, * Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin. * allowing us to fixup the user contexts on their first pin.
*/ */
intel_gt_pm_get(gt);
for_each_engine(engine, gt->i915, id) { for_each_engine(engine, gt->i915, id) {
struct intel_context *ce; struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context; ce = engine->kernel_context;
if (ce) if (ce)
ce->ops->reset(ce); ce->ops->reset(ce);
@ -143,5 +148,19 @@ void intel_gt_resume(struct intel_gt *gt)
ce = engine->preempt_context; ce = engine->preempt_context;
if (ce) if (ce)
ce->ops->reset(ce); ce->ops->reset(ce);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
} }
}
intel_gt_pm_put(gt);
return err;
} }

View File

@ -22,6 +22,6 @@ void intel_gt_pm_put(struct intel_gt *gt);
void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force); void intel_gt_sanitize(struct intel_gt *gt, bool force);
void intel_gt_resume(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */ #endif /* INTEL_GT_PM_H */

View File

@ -949,6 +949,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask); return gt_reset(i915, stalled_mask);
} }
static int resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
for_each_engine(engine, i915, id) {
ret = engine->resume(engine);
if (ret)
return ret;
}
return 0;
}
/** /**
* i915_reset - reset chip after a hang * i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset * @i915: #drm_i915_private to reset
@ -1023,9 +1038,13 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) { if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n", DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret); ret);
goto error; goto taint;
} }
ret = resume(i915);
if (ret)
goto taint;
i915_queue_hangcheck(i915); i915_queue_hangcheck(i915);
finish: finish:

View File

@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h" #include "gem/i915_gemfs.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h" #include "gt/intel_mocs.h"
@ -1192,12 +1191,17 @@ static void init_unused_rings(struct intel_gt *gt)
} }
} }
static int init_hw(struct intel_gt *gt) int i915_gem_init_hw(struct drm_i915_private *i915)
{ {
struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = &i915->uncore;
struct intel_uncore *uncore = gt->uncore; struct intel_gt *gt = &i915->gt;
int ret; int ret;
BUG_ON(!i915->kernel_context);
ret = i915_terminally_wedged(i915);
if (ret)
return ret;
gt->last_init_time = ktime_get(); gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */ /* Double layer security blanket, see i915_gem_init() */
@ -1248,51 +1252,10 @@ static int init_hw(struct intel_gt *gt)
intel_mocs_init_l3cc_table(gt); intel_mocs_init_l3cc_table(gt);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); intel_engines_set_scheduler_caps(i915);
return 0;
out: out:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
int i915_gem_init_hw(struct drm_i915_private *i915)
{
struct intel_uncore *uncore = &i915->uncore;
int ret;
BUG_ON(!i915->kernel_context);
ret = i915_terminally_wedged(i915);
if (ret)
return ret;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
ret = init_hw(&i915->gt);
if (ret)
goto err_init;
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_engines_resume(i915);
if (ret)
goto err_engines;
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(i915);
return 0;
err_engines:
intel_uc_fini_hw(i915);
err_init:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(i915);
return ret; return ret;
} }
@ -1524,6 +1487,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto err_uc_init; goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_gt_resume(&dev_priv->gt);
if (ret)
goto err_init_hw;
/* /*
* Despite its name intel_init_clock_gating applies both display * Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional * clock gating workarounds; GT mmio workarounds and the occasional
@ -1537,20 +1505,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv); ret = intel_engines_verify_workarounds(dev_priv);
if (ret) if (ret)
goto err_init_hw; goto err_gt;
ret = __intel_engines_record_defaults(dev_priv); ret = __intel_engines_record_defaults(dev_priv);
if (ret) if (ret)
goto err_init_hw; goto err_gt;
if (i915_inject_load_failure()) { if (i915_inject_load_failure()) {
ret = -ENODEV; ret = -ENODEV;
goto err_init_hw; goto err_gt;
} }
if (i915_inject_load_failure()) { if (i915_inject_load_failure()) {
ret = -EIO; ret = -EIO;
goto err_init_hw; goto err_gt;
} }
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@ -1564,7 +1532,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the * HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime. * driver doesn't explode during runtime.
*/ */
err_init_hw: err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv); i915_gem_set_wedged(dev_priv);
@ -1574,6 +1542,7 @@ err_init_hw:
i915_gem_drain_workqueue(dev_priv); i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(dev_priv); intel_uc_fini_hw(dev_priv);
err_uc_init: err_uc_init:
intel_uc_fini(dev_priv); intel_uc_fini(dev_priv);