drm/i915/gem: Reduce ctx->engine_mutex for reading the clone source

When cloning the engines from the source context, we need to ensure that
the engines are not freed as we copy them, and that the flags we clone
from the source correspond with the engines we copy across. To do this
we need only take a reference to the src->engines, rather than hold the
src->engine_mutex, so long as we verify that nothing changed under the
read.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210114135612.13210-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2021-01-14 13:56:10 +00:00
parent b2fe00bbb2
commit 1a72e7414d
1 changed files with 15 additions and 9 deletions

View File

@ -717,7 +717,8 @@ err_free:
} }
static inline struct i915_gem_engines * static inline struct i915_gem_engines *
__context_engines_await(const struct i915_gem_context *ctx) __context_engines_await(const struct i915_gem_context *ctx,
bool *user_engines)
{ {
struct i915_gem_engines *engines; struct i915_gem_engines *engines;
@ -726,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
engines = rcu_dereference(ctx->engines); engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines); GEM_BUG_ON(!engines);
if (user_engines)
*user_engines = i915_gem_context_user_engines(ctx);
/* successful await => strong mb */
if (unlikely(!i915_sw_fence_await(&engines->fence))) if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue; continue;
@ -749,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
struct intel_context *ce; struct intel_context *ce;
int err = 0; int err = 0;
e = __context_engines_await(ctx); e = __context_engines_await(ctx, NULL);
for_each_gem_engine(ce, e, it) { for_each_gem_engine(ce, e, it) {
err = fn(ce, data); err = fn(ce, data);
if (err) if (err)
@ -1075,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err; return err;
} }
e = __context_engines_await(ctx); e = __context_engines_await(ctx, NULL);
if (!e) { if (!e) {
i915_active_release(&cb->base); i915_active_release(&cb->base);
return -ENOENT; return -ENOENT;
@ -2095,11 +2100,14 @@ static int copy_ring_size(struct intel_context *dst,
static int clone_engines(struct i915_gem_context *dst, static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src) struct i915_gem_context *src)
{ {
struct i915_gem_engines *e = i915_gem_context_lock_engines(src); struct i915_gem_engines *clone, *e;
struct i915_gem_engines *clone;
bool user_engines; bool user_engines;
unsigned long n; unsigned long n;
e = __context_engines_await(src, &user_engines);
if (!e)
return -ENOENT;
clone = alloc_engines(e->num_engines); clone = alloc_engines(e->num_engines);
if (!clone) if (!clone)
goto err_unlock; goto err_unlock;
@ -2141,9 +2149,7 @@ static int clone_engines(struct i915_gem_context *dst,
} }
} }
clone->num_engines = n; clone->num_engines = n;
i915_sw_fence_complete(&e->fence);
user_engines = i915_gem_context_user_engines(src);
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */ /* Serialised by constructor */
engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@ -2154,7 +2160,7 @@ static int clone_engines(struct i915_gem_context *dst,
return 0; return 0;
err_unlock: err_unlock:
i915_gem_context_unlock_engines(src); i915_sw_fence_complete(&e->fence);
return -ENOMEM; return -ENOMEM;
} }