drm/i915: Accurately track when we mark the hardware as idle/busy
We currently call intel_mark_idle() too often, as we do so as a side-effect of processing the request queue. However, we the calls to intel_mark_idle() are expected to be paired with a call to intel_mark_busy() (or else we try to idle the hardware by accessing registers that are already disabled). Make the idle/busy tracking explicit to prevent the multiple calls. v2: We can drop some of the complexity in __i915_add_request() as queue_delayed_work() already behaves as we want (not requeuing the item if it is already in the queue) and mark_busy/mark_idle imply that the idle task is inactive. v3: We do still need to cancel the pending idle task so that it is sent again after the current busy load completes (not in the middle of it). Reported-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com> Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Tested-by: Paulo Zanoni <paulo.r.zanoni@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
e9dbd2b202
commit
f62a007603
|
@ -1124,6 +1124,14 @@ struct i915_gem_mm {
|
|||
*/
|
||||
bool interruptible;
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing userspace
|
||||
* requests? Whilst idle, we attempt to power down the hardware and
|
||||
* display clocks. In order to reduce the effect on performance, there
|
||||
* is a slight delay before we do so.
|
||||
*/
|
||||
bool busy;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
|
|
|
@ -2148,7 +2148,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 request_ring_position, request_start;
|
||||
int was_empty;
|
||||
int ret;
|
||||
|
||||
request_start = intel_ring_get_tail(ring);
|
||||
|
@ -2199,7 +2198,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
i915_gem_context_reference(request->ctx);
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
was_empty = list_empty(&ring->request_list);
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
|
@ -2220,13 +2218,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
if (!dev_priv->ums.mm_suspended) {
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
|
||||
if (was_empty) {
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
}
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
}
|
||||
|
||||
if (out_seqno)
|
||||
|
|
|
@ -8197,8 +8197,12 @@ void intel_mark_busy(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->mm.busy)
|
||||
return;
|
||||
|
||||
hsw_package_c8_gpu_busy(dev_priv);
|
||||
i915_update_gfx_val(dev_priv);
|
||||
dev_priv->mm.busy = true;
|
||||
}
|
||||
|
||||
void intel_mark_idle(struct drm_device *dev)
|
||||
|
@ -8206,6 +8210,11 @@ void intel_mark_idle(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!dev_priv->mm.busy)
|
||||
return;
|
||||
|
||||
dev_priv->mm.busy = false;
|
||||
|
||||
hsw_package_c8_gpu_idle(dev_priv);
|
||||
|
||||
if (!i915.powersave)
|
||||
|
|
Loading…
Reference in New Issue