drm/i915: Split idling from forcing context switch
We only need to force a switch to the kernel context placeholder during eviction. All other uses of i915_gpu_idle() just want to wait until existing work on the GPU is idle. Rename i915_gpu_idle() to i915_gem_wait_for_idle() to avoid any implications about "parking" the context first. v2: Tweak an error message if the wait fails for the ilk vtd w/a Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1466776558-21516-6-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
ec602a0dfb
commit
6e5a5beb8e
|
@ -4947,7 +4947,7 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
return ret;
|
||||
|
||||
if (val & DROP_ACTIVE) {
|
||||
ret = i915_gpu_idle(dev);
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
|
|
@ -3336,7 +3336,7 @@ int i915_gem_init_engines(struct drm_device *dev);
|
|||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
|
|
|
@ -3671,29 +3671,17 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
|
|||
return __i915_vma_unbind(vma, false);
|
||||
}
|
||||
|
||||
int i915_gpu_idle(struct drm_device *dev)
|
||||
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (engine->last_context == NULL)
|
||||
continue;
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_engine_idle(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -4963,7 +4951,7 @@ i915_gem_suspend(struct drm_device *dev)
|
|||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = i915_gpu_idle(dev);
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -33,6 +33,37 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
if (engine->last_context == NULL)
|
||||
continue;
|
||||
|
||||
if (engine->last_context == dev_priv->kernel_context)
|
||||
continue;
|
||||
|
||||
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
mark_free(struct i915_vma *vma, struct list_head *unwind)
|
||||
{
|
||||
|
@ -150,11 +181,17 @@ none:
|
|||
|
||||
/* Only idle the GPU and repeat the search once */
|
||||
if (pass++ == 0) {
|
||||
ret = i915_gpu_idle(dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(to_i915(dev));
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
|
@ -261,11 +298,17 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
|||
trace_i915_gem_evict_vm(vm);
|
||||
|
||||
if (do_idle) {
|
||||
ret = i915_gpu_idle(vm->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(to_i915(vm->dev));
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
WARN_ON(!list_empty(&vm->active_list));
|
||||
}
|
||||
|
|
|
@ -2261,8 +2261,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (unlikely(ggtt->do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
if (i915_gem_wait_for_idle(dev_priv)) {
|
||||
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
||||
/* Wait a bit, in hopes it avoids the hang */
|
||||
udelay(10);
|
||||
}
|
||||
|
|
|
@ -408,7 +408,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
return NOTIFY_DONE;
|
||||
|
||||
/* Force everything onto the inactive lists */
|
||||
ret = i915_gpu_idle(dev_priv->dev);
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
Loading…
Reference in New Issue