drm/i915: Cancel retire_worker on parking

Replace the racy continuation check within retire_work with a definite
kill-switch on idling. The race was being exposed by gem_concurrent_blit
where the retire_worker would be terminated too early leaving us
spinning in debugfs/i915_drop_caches with nothing flushing the
retirement queue.

Although that the igt is trying to idle from one child while submitting
from another may be a contributing factor as to why  it runs so slowly...

v2: Use the non-sync version of cancel_delayed_work(), we only need to
stop it from being scheduled as we independently check whether now is
the right time to be parking.

Testcase: igt/gem_concurrent_blit
Fixes: 79ffac8599 ("drm/i915: Invert the GEM wakeref hierarchy")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190507121108.18377-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-05-07 13:11:07 +01:00
parent ae2306315f
commit 1830374e13
2 changed files with 12 additions and 7 deletions

View File

@ -30,15 +30,23 @@ static void idle_work_handler(struct work_struct *work)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.idle_work); container_of(work, typeof(*i915), gem.idle_work);
bool restart = true;
cancel_delayed_work(&i915->gem.retire_work);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref); intel_wakeref_lock(&i915->gt.wakeref);
if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) {
i915_gem_park(i915); i915_gem_park(i915);
restart = false;
}
intel_wakeref_unlock(&i915->gt.wakeref); intel_wakeref_unlock(&i915->gt.wakeref);
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
if (restart)
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
} }
static void retire_work_handler(struct work_struct *work) static void retire_work_handler(struct work_struct *work)
@ -52,7 +60,6 @@ static void retire_work_handler(struct work_struct *work)
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
} }
if (intel_wakeref_active(&i915->gt.wakeref))
queue_delayed_work(i915->wq, queue_delayed_work(i915->wq,
&i915->gem.retire_work, &i915->gem.retire_work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
@ -140,7 +147,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* Assert that we successfully flushed all the work and * Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state. * reset the GPU back to its idle, low power state.
*/ */
drain_delayed_work(&i915->gem.retire_work);
GEM_BUG_ON(i915->gt.awake); GEM_BUG_ON(i915->gt.awake);
flush_work(&i915->gem.idle_work); flush_work(&i915->gem.idle_work);

View File

@ -58,7 +58,6 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_lost(i915); i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
drain_delayed_work(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work); flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915); i915_gem_drain_workqueue(i915);