drm/i915: Remove delay for idle_work

The original intent for the delay before running the idle_work was to
provide a hysteresis to avoid ping-ponging the device runtime-pm. Since
then we have also pulled in some memory management and general device
management for parking. But with the inversion of the wakeref handling,
GEM is no longer responsible for the wakeref and by the time we call the
idle_work, the device is asleep. It seems appropriate now to drop the
delay and just run the worker immediately to flush the cached GEM state
before sleeping.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190507121108.18377-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-05-07 13:11:06 +01:00
parent ba0001657f
commit ae2306315f
5 changed files with 12 additions and 19 deletions

View File

@ -3949,8 +3949,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_IDLE) {
do {
flush_delayed_work(&i915->gem.retire_work);
drain_delayed_work(&i915->gem.idle_work);
} while (READ_ONCE(i915->gt.awake));
flush_work(&i915->gem.idle_work);
}
if (val & DROP_FREED)

View File

@ -2031,7 +2031,7 @@ struct drm_i915_private {
* arrive within a small period of time, we fire
* off the idle_work.
*/
struct delayed_work idle_work;
struct work_struct idle_work;
} gem;
/* For i945gm vblank irq vs. C3 workaround */

View File

@ -29,12 +29,12 @@ static void i915_gem_park(struct drm_i915_private *i915)
static void idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.idle_work.work);
container_of(work, typeof(*i915), gem.idle_work);
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
if (!intel_wakeref_active(&i915->gt.wakeref))
if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work))
i915_gem_park(i915);
intel_wakeref_unlock(&i915->gt.wakeref);
@ -74,9 +74,7 @@ static int pm_notifier(struct notifier_block *nb,
break;
case INTEL_GT_PARK:
mod_delayed_work(i915->wq,
&i915->gem.idle_work,
msecs_to_jiffies(100));
queue_work(i915->wq, &i915->gem.idle_work);
break;
}
@ -142,16 +140,11 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
GEM_BUG_ON(i915->gt.awake);
cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
drain_delayed_work(&i915->gem.retire_work);
GEM_BUG_ON(i915->gt.awake);
flush_work(&i915->gem.idle_work);
/*
* As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
drain_delayed_work(&i915->gem.idle_work);
cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
i915_gem_drain_freed_objects(i915);
@ -242,7 +235,7 @@ err_wedged:
void i915_gem_init__pm(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->gem.idle_work, idle_work_handler);
INIT_WORK(&i915->gem.idle_work, idle_work_handler);
INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
i915->gem.pm_notifier.notifier_call = pm_notifier;

View File

@ -509,7 +509,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
intel_gt_pm_get(i915);
cancel_delayed_work_sync(&i915->gem.retire_work);
cancel_delayed_work_sync(&i915->gem.idle_work);
flush_work(&i915->gem.idle_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)

View File

@ -59,7 +59,7 @@ static void mock_device_release(struct drm_device *dev)
mutex_unlock(&i915->drm.struct_mutex);
drain_delayed_work(&i915->gem.retire_work);
drain_delayed_work(&i915->gem.idle_work);
flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
@ -195,7 +195,7 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
INIT_DELAYED_WORK(&i915->gem.idle_work, mock_idle_work_handler);
INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
i915->gt.awake = true;