drm/i915: Move releasing of the GEM request from free to retire/cancel

If we move the release of the GEM request (i.e. decoupling it from the
various lists used for client and context tracking) after it is complete
(either by the GPU retiring the request, or by the caller cancelling the
request), we can remove the requirement that the final unreference of
the GEM request need to be under the struct_mutex.

The careful reader may notice that one or two impossible NULL pointer
tests are dropped for readability. These pointers cannot be NULL since
they are assigned during request construction and never unset.

v2,v3: Rebalance execlists by moving the context unpinning.
v4: Rebase onto -nightly
v5: Avoid trying to rebalance execlist/GuC context pinning, leave that
to the next step

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461833819-3991-21-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-04-28 09:56:55 +01:00
parent 978f1e09ab
commit 73db04cfa8
4 changed files with 11 additions and 30 deletions

View File

@ -2373,23 +2373,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
static inline void
i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
{
struct drm_device *dev;
if (!req)
return;
dev = req->engine->dev;
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{

View File

@ -1413,6 +1413,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
if (request->ctx) {
if (i915.enable_execlists)
intel_lr_context_unpin(request->ctx, request->engine);
i915_gem_context_unreference(request->ctx);
}
i915_gem_request_unreference(request);
}
@ -2716,18 +2723,6 @@ void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
struct intel_context *ctx = req->ctx;
if (req->file_priv)
i915_gem_request_remove_from_client(req);
if (ctx) {
if (i915.enable_execlists)
intel_lr_context_unpin(ctx, req->engine);
i915_gem_context_unreference(ctx);
}
kmem_cache_free(req->i915->requests, req);
}
@ -3176,7 +3171,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
i915_gem_request_unreference(req[i]);
}
return ret;
@ -4202,7 +4197,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
i915_gem_request_unreference__unlocked(target);
i915_gem_request_unreference(target);
return ret;
}

View File

@ -11400,7 +11400,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
WARN_ON(__i915_wait_request(mmio_flip->req,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req);
i915_gem_request_unreference(mmio_flip->req);
}
/* For framebuffer backed by dmabuf, wait for fence */

View File

@ -7385,7 +7385,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
gen6_rps_boost(to_i915(req->engine->dev), NULL,
req->emitted_jiffies);
i915_gem_request_unreference__unlocked(req);
i915_gem_request_unreference(req);
kfree(boost);
}