drm/i915: Drop schedule_back from psr_exit
It doesn't make sense to never again schedule the work, since by the time we might want to re-enable psr the world might have changed and we can do it again. The only exception is when we shut down the pipe, but that's an entirely different thing and needs to be handled in psr_disable. Note that later patch will again split psr_exit into psr_invalidate and psr_flush. But the split is different and this simplification helps with the transition. v2: Improve the commit message a bit. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
e6e559d4a9
commit
3108e99ea9
|
@ -1395,7 +1395,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
/* Try to flush the object off the GPU without holding the lock.
|
||||
* We will repeat the flush holding the lock in the normal manner
|
||||
|
@ -1442,7 +1442,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
|
@ -4236,7 +4236,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
|
|
|
@ -8820,7 +8820,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
|||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
if (!i915.powersave)
|
||||
return;
|
||||
|
@ -9430,7 +9430,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
return -ENOMEM;
|
||||
|
||||
/* Exit PSR early in page flip */
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
work->event = event;
|
||||
work->crtc = crtc;
|
||||
|
|
|
@ -1904,7 +1904,7 @@ static void intel_edp_psr_inactivate(struct drm_device *dev)
|
|||
& ~EDP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
|
||||
void intel_edp_psr_exit(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1919,7 +1919,6 @@ void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
|
|||
if (dev_priv->psr.active)
|
||||
intel_edp_psr_inactivate(dev);
|
||||
|
||||
if (schedule_back)
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
||||
|
|
|
@ -835,7 +835,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
|
|||
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
||||
void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back);
|
||||
void intel_edp_psr_exit(struct drm_device *dev);
|
||||
void intel_edp_psr_init(struct drm_device *dev);
|
||||
|
||||
|
||||
|
|
|
@ -1051,7 +1051,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
intel_edp_psr_exit(dev, true);
|
||||
intel_edp_psr_exit(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue