drm/i915: Split alloc from init for lrc
Extend init/init_hw split to context init. - Move context initialisation in to i915_gem_init_hw - Move one off initialisation for render ring to i915_gem_validate_context - Move default context initialisation to logical_ring_init Rename intel_lr_context_deferred_create to intel_lr_context_deferred_alloc, to reflect reduced functionality & alloc/init split. This patch is intended to split out the allocation of resources & initialisation to allow easier reuse of code for resume/gpu reset. v2: Removed function ptr wrapping of do_switch_context (Daniel Vetter) Left ->init_context int intel_lr_context_deferred_alloc (Daniel Vetter) Remove unnecessary init flag & ring type test. (Daniel Vetter) Improve commit message (Daniel Vetter) v3: On init/reinit, set the hw next sequence number to the sw next sequence number. This is set to 1 at driver load time. This prevents the seqno being reset on reinit (Chris Wilson) v4: Set seqno back to ~0 - 0x1000 at start-of-day, and increment by 0x100 on reset. This makes it obvious which bbs are which after a reset. (David Gordon & John Harrison) Rebase. v5: Rebase. Fixed rebase breakage. Put context pinning in separate function. Removed code churn. (Thomas Daniel) v6: Cleanup up issues introduced in v2 & v5 (Thomas Daniel) Issue: VIZ-4798 Signed-off-by: Nick Hoath <nicholas.hoath@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Harrison <john.c.harrison@intel.com> Cc: David Gordon <david.s.gordon@intel.com> Cc: Thomas Daniel <thomas.daniel@intel.com> Reviewed-by: Thomas Daniel <thomas.daniel@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
87bcdd2e27
commit
e84fe80337
|
@ -890,7 +890,6 @@ struct intel_context {
|
|||
} legacy_hw_ctx;
|
||||
|
||||
/* Execlists */
|
||||
bool rcs_initialized;
|
||||
struct {
|
||||
struct drm_i915_gem_object *state;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
|
|
@ -4609,14 +4609,8 @@ int i915_gem_init_rings(struct drm_device *dev)
|
|||
goto cleanup_vebox_ring;
|
||||
}
|
||||
|
||||
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
|
||||
if (ret)
|
||||
goto cleanup_bsd2_ring;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_bsd2_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
|
||||
cleanup_vebox_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
|
||||
cleanup_blt_ring:
|
||||
|
@ -4705,6 +4699,14 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment the next seqno by 0x100 so we have a visible break
|
||||
* on re-initialisation
|
||||
*/
|
||||
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Now it is safe to go back round and do everything else: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
@ -4906,6 +4908,14 @@ i915_gem_load(struct drm_device *dev)
|
|||
dev_priv->num_fence_regs =
|
||||
I915_READ(vgtif_reg(avail_rs.fence_num));
|
||||
|
||||
/*
|
||||
* Set initial sequence number for requests.
|
||||
* Using this number allows the wraparound to happen early,
|
||||
* catching any obvious problems.
|
||||
*/
|
||||
dev_priv->next_seqno = ((u32)~0 - 0x1100);
|
||||
dev_priv->last_seqno = ((u32)~0 - 0x1101);
|
||||
|
||||
/* Initialize fence registers to zero */
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
i915_gem_restore_fences(dev);
|
||||
|
|
|
@ -1009,7 +1009,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||
}
|
||||
|
||||
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
|
||||
int ret = intel_lr_context_deferred_create(ctx, ring);
|
||||
int ret = intel_lr_context_deferred_alloc(ctx, ring);
|
||||
if (ret) {
|
||||
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -221,6 +221,9 @@ enum {
|
|||
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
|
||||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
|
||||
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *default_ctx_obj);
|
||||
|
||||
|
||||
/**
|
||||
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
|
||||
|
@ -1020,39 +1023,54 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
||||
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *ctx_obj,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = rq->i915;
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
||||
struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (ret)
|
||||
goto reset_pin_count;
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
|
||||
if (ret)
|
||||
goto unpin_ctx_obj;
|
||||
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
|
||||
if (ret)
|
||||
goto unpin_ctx_obj;
|
||||
|
||||
ctx_obj->dirty = true;
|
||||
ctx_obj->dirty = true;
|
||||
|
||||
/* Invalidate GuC TLB. */
|
||||
if (i915.enable_guc_submission)
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
}
|
||||
/* Invalidate GuC TLB. */
|
||||
if (i915.enable_guc_submission)
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
|
||||
return ret;
|
||||
|
||||
unpin_ctx_obj:
|
||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
int ret = 0;
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
||||
struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
||||
|
||||
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
|
||||
ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
|
||||
if (ret)
|
||||
goto reset_pin_count;
|
||||
}
|
||||
return ret;
|
||||
|
||||
reset_pin_count:
|
||||
rq->ctx->engine[ring->id].pin_count = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1462,6 +1480,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
|
|||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
lrc_setup_hardware_status_page(ring,
|
||||
ring->default_context->engine[ring->id].state);
|
||||
|
||||
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
|
||||
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
|
||||
|
||||
|
@ -1901,7 +1922,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_lr_context_deferred_create(ring->default_context, ring);
|
||||
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_do_pin(
|
||||
ring,
|
||||
ring->default_context->engine[ring->id].state,
|
||||
ring->default_context->engine[ring->id].ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2124,14 +2159,8 @@ int intel_logical_rings_init(struct drm_device *dev)
|
|||
goto cleanup_vebox_ring;
|
||||
}
|
||||
|
||||
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
|
||||
if (ret)
|
||||
goto cleanup_bsd2_ring;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_bsd2_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
|
||||
cleanup_vebox_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
|
||||
cleanup_blt_ring:
|
||||
|
@ -2401,7 +2430,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
|
||||
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
|
||||
* @ctx: LR context to create.
|
||||
* @ring: engine to be used with the context.
|
||||
*
|
||||
|
@ -2413,12 +2442,11 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
|
|||
*
|
||||
* Return: non-zero on error.
|
||||
*/
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
|
||||
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
const bool is_global_default_ctx = (ctx == ring->default_context);
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *ctx_obj;
|
||||
uint32_t context_size;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
@ -2438,82 +2466,50 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (is_global_default_ctx) {
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
|
||||
ret);
|
||||
drm_gem_object_unreference(&ctx_obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Invalidate GuC TLB. */
|
||||
if (i915.enable_guc_submission)
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
}
|
||||
|
||||
ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
|
||||
if (IS_ERR(ringbuf)) {
|
||||
ret = PTR_ERR(ringbuf);
|
||||
goto error_unpin_ctx;
|
||||
}
|
||||
|
||||
if (is_global_default_ctx) {
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error_ringbuf;
|
||||
}
|
||||
goto error_deref_obj;
|
||||
}
|
||||
|
||||
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
|
||||
goto error;
|
||||
goto error_ringbuf;
|
||||
}
|
||||
|
||||
ctx->engine[ring->id].ringbuf = ringbuf;
|
||||
ctx->engine[ring->id].state = ctx_obj;
|
||||
|
||||
if (ctx == ring->default_context)
|
||||
lrc_setup_hardware_status_page(ring, ctx_obj);
|
||||
else if (ring->id == RCS && !ctx->rcs_initialized) {
|
||||
if (ring->init_context) {
|
||||
struct drm_i915_gem_request *req;
|
||||
if (ctx != ring->default_context && ring->init_context) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
ret = i915_gem_request_alloc(ring, ctx, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ring->init_context(req);
|
||||
if (ret) {
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
i915_gem_request_cancel(req);
|
||||
ctx->engine[ring->id].ringbuf = NULL;
|
||||
ctx->engine[ring->id].state = NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_add_request_no_flush(req);
|
||||
ret = i915_gem_request_alloc(ring,
|
||||
ctx, &req);
|
||||
if (ret) {
|
||||
DRM_ERROR("ring create req: %d\n",
|
||||
ret);
|
||||
i915_gem_request_cancel(req);
|
||||
goto error_ringbuf;
|
||||
}
|
||||
|
||||
ctx->rcs_initialized = true;
|
||||
ret = ring->init_context(req);
|
||||
if (ret) {
|
||||
DRM_ERROR("ring init context: %d\n",
|
||||
ret);
|
||||
i915_gem_request_cancel(req);
|
||||
goto error_ringbuf;
|
||||
}
|
||||
i915_add_request_no_flush(req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (is_global_default_ctx)
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
error_ringbuf:
|
||||
intel_ringbuffer_free(ringbuf);
|
||||
error_unpin_ctx:
|
||||
if (is_global_default_ctx)
|
||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
error_deref_obj:
|
||||
drm_gem_object_unreference(&ctx_obj->base);
|
||||
ctx->engine[ring->id].ringbuf = NULL;
|
||||
ctx->engine[ring->id].state = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,8 +75,8 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
|||
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
|
||||
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
|
|
Loading…
Reference in New Issue