drm/i915: remove do_retire from i915_wait_request
This originates from a hack by me to quickly fix a bug in an earlier patch where we needed control over whether or not waiting on a seqno actually did any retire list processing. Since the two operations aren't clearly related, we should pull the parameter out of the wait function, and make the caller responsible for retiring if the action is desired. The only function call site which did not get an explicit retire_request call (on purpose) is i915_gem_inactive_shrink(). That code was already calling retire_request a second time. v2: don't modify any behavior excepit i915_gem_inactive_shrink(Daniel) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
507432986c
commit
b2da9fe5d5
|
@ -2009,9 +2009,10 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = i915_gpu_idle(dev, true);
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||
i915_gem_retire_requests(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Cancel the retire work handler, which should be idle now. */
|
||||
|
|
|
@ -1297,14 +1297,13 @@ int __must_check i915_gem_init_hw(struct drm_device *dev);
|
|||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_init_ppgtt(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_idle(struct drm_device *dev);
|
||||
int __must_check i915_add_request(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_request *request);
|
||||
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
|
||||
uint32_t seqno,
|
||||
bool do_retire);
|
||||
uint32_t seqno);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int __must_check
|
||||
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
|
||||
|
|
|
@ -1825,8 +1825,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||
*/
|
||||
int
|
||||
i915_wait_request(struct intel_ring_buffer *ring,
|
||||
uint32_t seqno,
|
||||
bool do_retire)
|
||||
uint32_t seqno)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
u32 ier;
|
||||
|
@ -1902,14 +1901,6 @@ i915_wait_request(struct intel_ring_buffer *ring,
|
|||
if (atomic_read(&dev_priv->mm.wedged))
|
||||
ret = -EAGAIN;
|
||||
|
||||
/* Directly dispatch request retiring. While we have the work queue
|
||||
* to handle this, the waiter on a request often wants an associated
|
||||
* buffer to have made it to the inactive list, and we would need
|
||||
* a separate wait queue to handle that.
|
||||
*/
|
||||
if (ret == 0 && do_retire)
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1931,10 +1922,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
|
|||
* it.
|
||||
*/
|
||||
if (obj->active) {
|
||||
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
|
||||
true);
|
||||
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_retire_requests_ring(obj->ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2117,7 +2108,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
|
||||
static int i915_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2131,18 +2122,17 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
|
|||
return ret;
|
||||
}
|
||||
|
||||
return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
|
||||
do_retire);
|
||||
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
|
||||
}
|
||||
|
||||
int i915_gpu_idle(struct drm_device *dev, bool do_retire)
|
||||
int i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
|
||||
ret = i915_ring_idle(&dev_priv->ring[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -2331,9 +2321,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
|
||||
if (obj->last_fenced_seqno) {
|
||||
ret = i915_wait_request(obj->ring,
|
||||
obj->last_fenced_seqno,
|
||||
false);
|
||||
ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3394,11 +3382,12 @@ i915_gem_idle(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
ret = i915_gpu_idle(dev, true);
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Under UMS, be paranoid and evict. */
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
|
@ -4025,7 +4014,7 @@ rescan:
|
|||
* This has a dramatic impact to reduce the number of
|
||||
* OOM-killer events whilst running the GPU aggressively.
|
||||
*/
|
||||
if (i915_gpu_idle(dev, true) == 0)
|
||||
if (i915_gpu_idle(dev) == 0)
|
||||
goto rescan;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
bool lists_empty;
|
||||
int ret;
|
||||
int ret,i;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
|
@ -178,11 +178,20 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
|
|||
|
||||
trace_i915_gem_evict_everything(dev, purgeable_only);
|
||||
|
||||
/* Flush everything (on to the inactive lists) and evict */
|
||||
ret = i915_gpu_idle(dev, true);
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* The gpu_idle will flush everything in the write domain to the
|
||||
* active list. Then we must move everything off the active list
|
||||
* with retire requests.
|
||||
*/
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
|
||||
return -EBUSY;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
|
|
|
@ -1220,9 +1220,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
* so every billion or so execbuffers, we need to stall
|
||||
* the GPU in order to reset the counters.
|
||||
*/
|
||||
ret = i915_gpu_idle(dev, true);
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
BUG_ON(ring->sync_seqno[i]);
|
||||
}
|
||||
|
|
|
@ -317,7 +317,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev, false)) {
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
/* Wait a bit, in hopes it avoids the hang */
|
||||
udelay(10);
|
||||
|
|
|
@ -225,10 +225,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
|||
}
|
||||
overlay->last_flip_req = request->seqno;
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
|
||||
true);
|
||||
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
overlay->last_flip_req = 0;
|
||||
return 0;
|
||||
|
@ -447,10 +447,10 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
|||
if (overlay->last_flip_req == 0)
|
||||
return 0;
|
||||
|
||||
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
|
||||
true);
|
||||
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
if (overlay->flip_tail)
|
||||
overlay->flip_tail(overlay);
|
||||
|
|
|
@ -1088,9 +1088,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
|||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
ret = i915_wait_request(ring, seqno, true);
|
||||
ret = i915_wait_request(ring, seqno);
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
if (!ret)
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue