drm/i915: Push the ring creation flags to the backend
Push the ring creation flags from the outer GEM context to the inner intel_context to avoid an unsightly back-reference from inside the backend. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Andi Shyti <andi.shyti@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-3-chris@chris-wilson.co.uk
This commit is contained in:
parent
4c60b1aaa2
commit
48ae397b6b
|
@ -436,8 +436,6 @@ __create_context(struct drm_i915_private *i915)
|
|||
i915_gem_context_set_bannable(ctx);
|
||||
i915_gem_context_set_recoverable(ctx);
|
||||
|
||||
ctx->ring_size = 4 * PAGE_SIZE;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
||||
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
||||
|
||||
|
@ -448,22 +446,34 @@ err_free:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void
|
||||
context_apply_all(struct i915_gem_context *ctx,
|
||||
void (*fn)(struct intel_context *ce, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
|
||||
fn(ce, data);
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
}
|
||||
|
||||
static void __apply_ppgtt(struct intel_context *ce, void *vm)
|
||||
{
|
||||
i915_vm_put(ce->vm);
|
||||
ce->vm = i915_vm_get(vm);
|
||||
}
|
||||
|
||||
static struct i915_address_space *
|
||||
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_address_space *old = ctx->vm;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
|
||||
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
|
||||
|
||||
ctx->vm = i915_vm_get(vm);
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
i915_vm_put(ce->vm);
|
||||
ce->vm = i915_vm_get(vm);
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
context_apply_all(ctx, __apply_ppgtt, vm);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
@ -560,7 +570,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
|
|||
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
ctx->sched.priority = I915_USER_PRIORITY(prio);
|
||||
ctx->ring_size = PAGE_SIZE;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
|
||||
|
||||
|
@ -1544,6 +1553,7 @@ set_engines(struct i915_gem_context *ctx,
|
|||
for (n = 0; n < num_engines; n++) {
|
||||
struct i915_engine_class_instance ci;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ce;
|
||||
|
||||
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
|
||||
__free_engines(set.engines, n);
|
||||
|
@ -1566,11 +1576,13 @@ set_engines(struct i915_gem_context *ctx,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
set.engines->engines[n] = intel_context_create(ctx, engine);
|
||||
if (!set.engines->engines[n]) {
|
||||
ce = intel_context_create(ctx, engine);
|
||||
if (IS_ERR(ce)) {
|
||||
__free_engines(set.engines, n);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(ce);
|
||||
}
|
||||
|
||||
set.engines->engines[n] = ce;
|
||||
}
|
||||
set.engines->num_engines = num_engines;
|
||||
|
||||
|
|
|
@ -169,9 +169,6 @@ struct i915_gem_context {
|
|||
|
||||
struct i915_sched_attr sched;
|
||||
|
||||
/** ring_size: size for allocating the per-engine ring buffer */
|
||||
u32 ring_size;
|
||||
|
||||
/** guilty_count: How many times this context has caused a GPU hang. */
|
||||
atomic_t guilty_count;
|
||||
/**
|
||||
|
|
|
@ -222,6 +222,7 @@ intel_context_init(struct intel_context *ce,
|
|||
ce->engine = engine;
|
||||
ce->ops = engine->cops;
|
||||
ce->sseu = engine->sseu;
|
||||
ce->ring = __intel_context_ring_size(SZ_16K);
|
||||
|
||||
INIT_LIST_HEAD(&ce->signal_link);
|
||||
INIT_LIST_HEAD(&ce->signals);
|
||||
|
|
|
@ -136,4 +136,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||
|
||||
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
||||
|
||||
static inline struct intel_ring *__intel_context_ring_size(u64 sz)
|
||||
{
|
||||
return u64_to_ptr(struct intel_ring, sz);
|
||||
}
|
||||
|
||||
#endif /* __INTEL_CONTEXT_H__ */
|
||||
|
|
|
@ -738,6 +738,8 @@ create_kernel_context(struct intel_engine_cs *engine)
|
|||
if (IS_ERR(ce))
|
||||
return ce;
|
||||
|
||||
ce->ring = __intel_context_ring_size(SZ_4K);
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
intel_context_put(ce);
|
||||
|
|
|
@ -3115,9 +3115,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
|
|||
goto error_deref_obj;
|
||||
}
|
||||
|
||||
ring = intel_engine_create_ring(engine,
|
||||
timeline,
|
||||
ce->gem_context->ring_size);
|
||||
ring = intel_engine_create_ring(engine, timeline,
|
||||
(unsigned long)ce->ring);
|
||||
intel_timeline_put(timeline);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
|
|
|
@ -2350,7 +2350,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
|
|||
}
|
||||
GEM_BUG_ON(timeline->has_initial_breadcrumb);
|
||||
|
||||
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
|
||||
ring = intel_engine_create_ring(engine, timeline, SZ_16K);
|
||||
intel_timeline_put(timeline);
|
||||
if (IS_ERR(ring)) {
|
||||
err = PTR_ERR(ring);
|
||||
|
|
|
@ -140,7 +140,7 @@ static void mock_context_destroy(struct kref *ref)
|
|||
|
||||
GEM_BUG_ON(intel_context_is_pinned(ce));
|
||||
|
||||
if (ce->ring)
|
||||
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
|
||||
mock_ring_free(ce->ring);
|
||||
|
||||
intel_context_fini(ce);
|
||||
|
|
|
@ -1227,8 +1227,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
|||
}
|
||||
|
||||
i915_gem_context_set_force_single_submission(ctx);
|
||||
if (!USES_GUC_SUBMISSION(i915))
|
||||
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
|
||||
|
||||
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
|
||||
|
||||
|
@ -1244,6 +1242,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
|||
goto out_shadow_ctx;
|
||||
}
|
||||
|
||||
if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
|
||||
const unsigned int ring_size = 512 * SZ_4K;
|
||||
|
||||
ce->ring = __intel_context_ring_size(ring_size);
|
||||
}
|
||||
|
||||
ret = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (ret)
|
||||
|
|
|
@ -324,10 +324,14 @@ static void print_context_stats(struct seq_file *m,
|
|||
|
||||
for_each_gem_engine(ce,
|
||||
i915_gem_context_lock_engines(ctx), it) {
|
||||
if (ce->state)
|
||||
per_file_stats(0, ce->state->obj, &kstats);
|
||||
if (ce->ring)
|
||||
intel_context_lock_pinned(ce);
|
||||
if (intel_context_is_pinned(ce)) {
|
||||
if (ce->state)
|
||||
per_file_stats(0,
|
||||
ce->state->obj, &kstats);
|
||||
per_file_stats(0, ce->ring->vma->obj, &kstats);
|
||||
}
|
||||
intel_context_unlock_pinned(ce);
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
|
||||
|
@ -1627,12 +1631,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
|
||||
for_each_gem_engine(ce,
|
||||
i915_gem_context_lock_engines(ctx), it) {
|
||||
seq_printf(m, "%s: ", ce->engine->name);
|
||||
if (ce->state)
|
||||
describe_obj(m, ce->state->obj);
|
||||
if (ce->ring)
|
||||
intel_context_lock_pinned(ce);
|
||||
if (intel_context_is_pinned(ce)) {
|
||||
seq_printf(m, "%s: ", ce->engine->name);
|
||||
if (ce->state)
|
||||
describe_obj(m, ce->state->obj);
|
||||
describe_ctx_ring(m, ce->ring);
|
||||
seq_putc(m, '\n');
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
intel_context_unlock_pinned(ce);
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
|
||||
|
|
Loading…
Reference in New Issue