drm/i915: Rename intel_engine_cs struct members
below and a couple manual fixups. @@ identifier I, J; @@ struct I { ... - struct intel_engine_cs *J; + struct intel_engine_cs *engine; ... } @@ identifier I, J; @@ struct I { ... - struct intel_engine_cs J; + struct intel_engine_cs engine; ... } @@ struct drm_i915_private *d; @@ ( - d->ring + d->engine ) @@ struct i915_execbuffer_params *p; @@ ( - p->ring + p->engine ) @@ struct intel_ringbuffer *r; @@ ( - r->ring + r->engine ) @@ struct drm_i915_gem_request *req; @@ ( - req->ring + req->engine ) v2: Script missed the tracepoint code - fixed up by hand. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
0bc40be85f
commit
4a570db57c
|
@ -984,7 +984,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
|||
const u32 *hws;
|
||||
int i;
|
||||
|
||||
engine = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
|
||||
hws = engine->status_page.page_addr;
|
||||
if (hws == NULL)
|
||||
return 0;
|
||||
|
|
|
@ -87,16 +87,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_BSD:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VCS]);
|
||||
value = intel_ring_initialized(&dev_priv->engine[VCS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_BLT:
|
||||
value = intel_ring_initialized(&dev_priv->ring[BCS]);
|
||||
value = intel_ring_initialized(&dev_priv->engine[BCS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_VEBOX:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VECS]);
|
||||
value = intel_ring_initialized(&dev_priv->engine[VECS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_BSD2:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VCS2]);
|
||||
value = intel_ring_initialized(&dev_priv->engine[VCS2]);
|
||||
break;
|
||||
case I915_PARAM_HAS_RELAXED_FENCING:
|
||||
value = 1;
|
||||
|
|
|
@ -1652,7 +1652,7 @@ struct i915_execbuffer_params {
|
|||
uint32_t dispatch_flags;
|
||||
uint32_t args_batch_start_offset;
|
||||
uint64_t batch_obj_vm_offset;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct intel_context *ctx;
|
||||
struct drm_i915_gem_request *request;
|
||||
|
@ -1704,7 +1704,7 @@ struct drm_i915_private {
|
|||
wait_queue_head_t gmbus_wait_queue;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_engine_cs ring[I915_NUM_RINGS];
|
||||
struct intel_engine_cs engine[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_object *semaphore_obj;
|
||||
uint32_t last_seqno, next_seqno;
|
||||
|
||||
|
@ -1969,7 +1969,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
|||
/* Iterate over initialised rings */
|
||||
#define for_each_ring(ring__, dev_priv__, i__) \
|
||||
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
|
||||
for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
|
||||
for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__))))
|
||||
|
||||
enum hdmi_force_audio {
|
||||
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
|
||||
|
@ -2184,7 +2184,7 @@ struct drm_i915_gem_request {
|
|||
|
||||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
|
@ -2279,7 +2279,7 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
|
|||
static inline struct intel_engine_cs *
|
||||
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->ring : NULL;
|
||||
return req ? req->engine : NULL;
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
|
@ -2293,7 +2293,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
|
|||
static inline void
|
||||
i915_gem_request_unreference(struct drm_i915_gem_request *req)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
|
||||
kref_put(&req->ref, i915_gem_request_free);
|
||||
}
|
||||
|
||||
|
@ -2305,7 +2305,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
|
|||
if (!req)
|
||||
return;
|
||||
|
||||
dev = req->ring->dev;
|
||||
dev = req->engine->dev;
|
||||
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
@ -2949,14 +2949,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|||
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->seqno);
|
||||
}
|
||||
|
||||
|
|
|
@ -1193,7 +1193,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
|||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
if (req->ring->irq_refcount)
|
||||
if (req->engine->irq_refcount)
|
||||
return -EBUSY;
|
||||
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
|
@ -1381,7 +1381,7 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
|||
if (req->file_priv)
|
||||
return -EINVAL;
|
||||
|
||||
dev_private = req->ring->dev->dev_private;
|
||||
dev_private = req->engine->dev->dev_private;
|
||||
file_priv = file->driver_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
|
@ -1434,7 +1434,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
|||
static void
|
||||
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&engine->dev->struct_mutex);
|
||||
|
@ -1466,7 +1466,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
|
|||
|
||||
BUG_ON(req == NULL);
|
||||
|
||||
dev = req->ring->dev;
|
||||
dev = req->engine->dev;
|
||||
dev_priv = dev->dev_private;
|
||||
interruptible = dev_priv->mm.interruptible;
|
||||
|
||||
|
@ -1505,7 +1505,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
i = obj->last_write_req->ring->id;
|
||||
i = obj->last_write_req->engine->id;
|
||||
if (obj->last_read_req[i] == obj->last_write_req)
|
||||
i915_gem_object_retire__read(obj, i);
|
||||
else
|
||||
|
@ -1532,7 +1532,7 @@ static void
|
|||
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ring = req->ring->id;
|
||||
int ring = req->engine->id;
|
||||
|
||||
if (obj->last_read_req[ring] == req)
|
||||
i915_gem_object_retire__read(obj, ring);
|
||||
|
@ -2423,7 +2423,7 @@ static void
|
|||
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
RQ_BUG_ON(obj->last_write_req == NULL);
|
||||
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
|
||||
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->engine)));
|
||||
|
||||
i915_gem_request_assign(&obj->last_write_req, NULL);
|
||||
intel_fb_obj_flush(obj, true, ORIGIN_CS);
|
||||
|
@ -2440,7 +2440,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
|||
list_del_init(&obj->ring_list[ring]);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
|
||||
|
||||
if (obj->last_write_req && obj->last_write_req->ring->id == ring)
|
||||
if (obj->last_write_req && obj->last_write_req->engine->id == ring)
|
||||
i915_gem_object_retire__write(obj);
|
||||
|
||||
obj->active &= ~(1 << ring);
|
||||
|
@ -2551,7 +2551,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||
if (WARN_ON(request == NULL))
|
||||
return;
|
||||
|
||||
engine = request->ring;
|
||||
engine = request->engine;
|
||||
dev_priv = engine->dev->dev_private;
|
||||
ringbuf = request->ringbuf;
|
||||
|
||||
|
@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)
|
|||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists && ctx != req->i915->kernel_context)
|
||||
intel_lr_context_unpin(ctx, req->ring);
|
||||
intel_lr_context_unpin(ctx, req->engine);
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
|
@ -2712,7 +2712,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
|
||||
kref_init(&req->ref);
|
||||
req->i915 = dev_priv;
|
||||
req->ring = engine;
|
||||
req->engine = engine;
|
||||
req->ctx = ctx;
|
||||
i915_gem_context_reference(req->ctx);
|
||||
|
||||
|
@ -4364,10 +4364,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
req = obj->last_read_req[i];
|
||||
if (req)
|
||||
args->busy |= 1 << (16 + req->ring->exec_id);
|
||||
args->busy |= 1 << (16 + req->engine->exec_id);
|
||||
}
|
||||
if (obj->last_write_req)
|
||||
args->busy |= obj->last_write_req->ring->exec_id;
|
||||
args->busy |= obj->last_write_req->engine->exec_id;
|
||||
}
|
||||
|
||||
unref:
|
||||
|
@ -4697,7 +4697,7 @@ err:
|
|||
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
|
||||
|
@ -4814,13 +4814,13 @@ int i915_gem_init_rings(struct drm_device *dev)
|
|||
return 0;
|
||||
|
||||
cleanup_vebox_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
|
||||
intel_cleanup_ring_buffer(&dev_priv->engine[VECS]);
|
||||
cleanup_blt_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
|
||||
intel_cleanup_ring_buffer(&dev_priv->engine[BCS]);
|
||||
cleanup_bsd_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
|
||||
intel_cleanup_ring_buffer(&dev_priv->engine[VCS]);
|
||||
cleanup_render_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
|
||||
intel_cleanup_ring_buffer(&dev_priv->engine[RCS]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -5056,7 +5056,7 @@ i915_gem_load_init(struct drm_device *dev)
|
|||
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
init_ring_lists(&dev_priv->ring[i]);
|
||||
init_ring_lists(&dev_priv->engine[i]);
|
||||
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
|
||||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
|
|
|
@ -346,7 +346,7 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[i];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
|
@ -421,13 +421,13 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
* to default context. So we need to unreference the base object once
|
||||
* to offset the do_switch part, so that i915_gem_context_unreference()
|
||||
* can then free the base object correctly. */
|
||||
WARN_ON(!dev_priv->ring[RCS].last_context);
|
||||
WARN_ON(!dev_priv->engine[RCS].last_context);
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
|
||||
for (i = I915_NUM_RINGS; --i >= 0;) {
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[i];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
|
@ -441,7 +441,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
|
@ -510,7 +510,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
|||
static inline int
|
||||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||
|
@ -625,7 +625,7 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
|
|||
if (INTEL_INFO(engine->dev)->gen < 8)
|
||||
return true;
|
||||
|
||||
if (engine != &dev_priv->ring[RCS])
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -643,7 +643,7 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
|
|||
if (!IS_GEN8(engine->dev))
|
||||
return false;
|
||||
|
||||
if (engine != &dev_priv->ring[RCS])
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
return false;
|
||||
|
||||
if (hw_flags & MI_RESTORE_INHIBIT)
|
||||
|
@ -655,14 +655,14 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
|
|||
static int do_switch(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_context *to = req->ctx;
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
struct intel_context *from = engine->last_context;
|
||||
u32 hw_flags = 0;
|
||||
bool uninitialized = false;
|
||||
int ret, i;
|
||||
|
||||
if (from != NULL && engine == &dev_priv->ring[RCS]) {
|
||||
if (from != NULL && engine == &dev_priv->engine[RCS]) {
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
|
||||
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ static int do_switch(struct drm_i915_gem_request *req)
|
|||
return 0;
|
||||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
if (engine == &dev_priv->ring[RCS]) {
|
||||
if (engine == &dev_priv->engine[RCS]) {
|
||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(engine->dev),
|
||||
0);
|
||||
|
@ -700,7 +700,7 @@ static int do_switch(struct drm_i915_gem_request *req)
|
|||
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
|
||||
}
|
||||
|
||||
if (engine != &dev_priv->ring[RCS]) {
|
||||
if (engine != &dev_priv->engine[RCS]) {
|
||||
if (from)
|
||||
i915_gem_context_unreference(from);
|
||||
goto done;
|
||||
|
@ -828,7 +828,7 @@ unpin_out:
|
|||
*/
|
||||
int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
|
|
|
@ -942,7 +942,7 @@ static int
|
|||
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
const unsigned other_rings = ~intel_ring_flag(req->ring);
|
||||
const unsigned other_rings = ~intel_ring_flag(req->engine);
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
|
@ -952,7 +952,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->active & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, req->ring, &req);
|
||||
ret = i915_gem_object_sync(obj, req->engine, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -964,7 +964,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
}
|
||||
|
||||
if (flush_chipset)
|
||||
i915_gem_chipset_flush(req->ring->dev);
|
||||
i915_gem_chipset_flush(req->engine->dev);
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
|
@ -1140,7 +1140,7 @@ void
|
|||
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
params->ring->gpu_caches_dirty = true;
|
||||
params->engine->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
__i915_add_request(params->request, params->batch_obj, true);
|
||||
|
@ -1150,11 +1150,11 @@ static int
|
|||
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) {
|
||||
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
|
||||
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1233,7 +1233,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->ring;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
|
@ -1257,7 +1257,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
|
||||
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1284,7 +1284,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (engine == &dev_priv->ring[RCS] &&
|
||||
if (engine == &dev_priv->engine[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
|
@ -1412,9 +1412,9 @@ eb_select_ring(struct drm_i915_private *dev_priv,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ring = &dev_priv->ring[_VCS(bsd_idx)];
|
||||
*ring = &dev_priv->engine[_VCS(bsd_idx)];
|
||||
} else {
|
||||
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
|
||||
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
|
||||
}
|
||||
|
||||
if (!intel_ring_initialized(*ring)) {
|
||||
|
@ -1632,7 +1632,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
*/
|
||||
params->dev = dev;
|
||||
params->file = file;
|
||||
params->ring = engine;
|
||||
params->engine = engine;
|
||||
params->dispatch_flags = dispatch_flags;
|
||||
params->batch_obj = batch_obj;
|
||||
params->ctx = ctx;
|
||||
|
|
|
@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
|||
unsigned entry,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
BUG_ON(entry >= 4);
|
||||
|
@ -1650,7 +1650,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
|||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
|
@ -1676,7 +1676,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
||||
|
@ -1687,7 +1687,7 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
|
@ -1720,7 +1720,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -2192,7 +2192,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
|
|||
|
||||
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = req->engine->dev->dev_private;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
|
@ -2309,7 +2309,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
|
|||
fault_reg & ~RING_FAULT_VALID);
|
||||
}
|
||||
}
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
|
||||
}
|
||||
|
||||
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
|
||||
|
|
|
@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
|
|||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(req->ring, &so);
|
||||
ret = i915_gem_render_state_prepare(req->engine, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (so.aux_batch_size > 8) {
|
||||
ret = req->ring->dispatch_execbuffer(req,
|
||||
ret = req->engine->dispatch_execbuffer(req,
|
||||
(so.ggtt_offset +
|
||||
so.aux_batch_offset),
|
||||
so.aux_batch_size,
|
||||
|
|
|
@ -431,7 +431,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
obj = error->ring[i].batchbuffer;
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->ring[i].name);
|
||||
err_puts(m, dev_priv->engine[i].name);
|
||||
if (error->ring[i].pid != -1)
|
||||
err_printf(m, " (submitted by %s [%d])",
|
||||
error->ring[i].comm,
|
||||
|
@ -445,14 +445,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
obj = error->ring[i].wa_batchbuffer;
|
||||
if (obj) {
|
||||
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
err_printf(m, "%s --- %d requests\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
error->ring[i].num_requests);
|
||||
for (j = 0; j < error->ring[i].num_requests; j++) {
|
||||
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
|
||||
|
@ -464,7 +464,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
@ -478,7 +478,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
|
||||
}
|
||||
err_printf(m, "%s --- HW Status = 0x%08llx\n",
|
||||
dev_priv->ring[i].name, hws_offset);
|
||||
dev_priv->engine[i].name, hws_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
|
@ -495,12 +495,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
if (obj) {
|
||||
u64 wa_ctx_offset = obj->gtt_offset;
|
||||
u32 *wa_ctx_page = &obj->pages[0][0];
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
|
||||
engine->wa_ctx.per_ctx.size);
|
||||
|
||||
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
|
||||
dev_priv->ring[i].name, wa_ctx_offset);
|
||||
dev_priv->engine[i].name, wa_ctx_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < wa_ctx_size; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
|
@ -515,7 +515,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
|
||||
if ((obj = error->ring[i].ctx)) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
@ -1020,7 +1020,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
int i, count;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[i];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
struct intel_ringbuffer *rbuf;
|
||||
|
||||
error->ring[i].pid = -1;
|
||||
|
|
|
@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
|||
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
(wq_len << WQ_LEN_SHIFT) |
|
||||
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
|
||||
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
|
||||
WQ_NO_WCFLUSH_WAIT;
|
||||
|
||||
/* The GuC wants only the low-order word of the context descriptor */
|
||||
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
|
||||
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
|
||||
rq->engine);
|
||||
|
||||
/* The GuC firmware wants the tail index in QWords, not bytes */
|
||||
tail = rq->ringbuf->tail >> 3;
|
||||
|
@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client,
|
|||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_guc *guc = client->guc;
|
||||
unsigned int engine_id = rq->ring->guc_id;
|
||||
unsigned int engine_id = rq->engine->guc_id;
|
||||
int q_ret, b_ret;
|
||||
|
||||
q_ret = guc_add_workqueue_item(client, rq);
|
||||
|
@ -867,7 +868,7 @@ static void guc_create_ads(struct intel_guc *guc)
|
|||
* so its address won't change after we've told the GuC where
|
||||
* to find it.
|
||||
*/
|
||||
engine = &dev_priv->ring[RCS];
|
||||
engine = &dev_priv->engine[RCS];
|
||||
ads->golden_context_lrca = engine->status_page.gfx_addr;
|
||||
|
||||
for_each_ring(engine, dev_priv, i)
|
||||
|
|
|
@ -1291,9 +1291,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
|
|||
{
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & ILK_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
}
|
||||
|
||||
static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
|
@ -1303,11 +1303,11 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
|||
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & GT_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
if (gt_iir & GT_BLT_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[BCS]);
|
||||
notify_ring(&dev_priv->engine[BCS]);
|
||||
|
||||
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
|
||||
GT_BSD_CS_ERROR_INTERRUPT |
|
||||
|
@ -1338,11 +1338,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE_FW(GEN8_GT_IIR(0), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[RCS],
|
||||
iir, GEN8_RCS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[RCS],
|
||||
iir, GEN8_RCS_IRQ_SHIFT);
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[BCS],
|
||||
iir, GEN8_BCS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[BCS],
|
||||
iir, GEN8_BCS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||
}
|
||||
|
@ -1353,11 +1353,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE_FW(GEN8_GT_IIR(1), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS],
|
||||
iir, GEN8_VCS1_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VCS],
|
||||
iir, GEN8_VCS1_IRQ_SHIFT);
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS2],
|
||||
iir, GEN8_VCS2_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VCS2],
|
||||
iir, GEN8_VCS2_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
}
|
||||
|
@ -1368,8 +1368,8 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE_FW(GEN8_GT_IIR(3), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VECS],
|
||||
iir, GEN8_VECS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VECS],
|
||||
iir, GEN8_VECS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
||||
}
|
||||
|
@ -1629,7 +1629,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||
|
||||
if (HAS_VEBOX(dev_priv->dev)) {
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VECS]);
|
||||
notify_ring(&dev_priv->engine[VECS]);
|
||||
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
|
||||
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
|
||||
|
@ -4042,7 +4042,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
|||
new_iir = I915_READ16(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int plane = pipe;
|
||||
|
@ -4238,7 +4238,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int plane = pipe;
|
||||
|
@ -4468,9 +4468,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (iir & I915_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
|
||||
|
|
|
@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||
TP_fast_assign(
|
||||
__entry->dev = from->dev->primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to_req->ring->id;
|
||||
__entry->sync_to = to_req->engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
),
|
||||
|
||||
|
@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
__entry->flags = flags;
|
||||
i915_trace_irq_get(ring, req);
|
||||
i915_trace_irq_get(engine, req);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
||||
|
@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->ring->dev->primary->index;
|
||||
__entry->ring = req->ring->id;
|
||||
__entry->dev = req->engine->dev->primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->invalidate = invalidate;
|
||||
__entry->flush = flush;
|
||||
),
|
||||
|
@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
),
|
||||
|
||||
|
@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_request_notify,
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring),
|
||||
TP_PROTO(struct intel_engine_cs *engine),
|
||||
TP_ARGS(engine),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
|
@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->seqno = ring->get_seqno(ring, false);
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = engine->get_seqno(engine, false);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||
|
@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
* less desirable.
|
||||
*/
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
__entry->blocking =
|
||||
mutex_is_locked(&ring->dev->struct_mutex);
|
||||
mutex_is_locked(&engine->dev->struct_mutex);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
||||
|
@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
|
|||
* called only if full ppgtt is enabled.
|
||||
*/
|
||||
TRACE_EVENT(switch_mm,
|
||||
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
|
||||
TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
|
||||
|
||||
TP_ARGS(ring, to),
|
||||
TP_ARGS(engine, to),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, ring)
|
||||
|
@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = ring->id;
|
||||
__entry->ring = engine->id;
|
||||
__entry->to = to;
|
||||
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
||||
|
|
|
@ -10984,7 +10984,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11019,7 +11019,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11051,7 +11051,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11090,7 +11090,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11126,7 +11126,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane_bit = 0;
|
||||
int len, ret;
|
||||
|
@ -11575,18 +11575,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
engine = &dev_priv->ring[BCS];
|
||||
engine = &dev_priv->engine[BCS];
|
||||
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
|
||||
/* vlv: DISPLAY_FLIP fails to change tiling */
|
||||
engine = NULL;
|
||||
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
engine = &dev_priv->ring[BCS];
|
||||
engine = &dev_priv->engine[BCS];
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
engine = i915_gem_request_get_ring(obj->last_write_req);
|
||||
if (engine == NULL || engine->id != RCS)
|
||||
engine = &dev_priv->ring[BCS];
|
||||
engine = &dev_priv->engine[BCS];
|
||||
} else {
|
||||
engine = &dev_priv->ring[RCS];
|
||||
engine = &dev_priv->engine[RCS];
|
||||
}
|
||||
|
||||
mmio_flip = use_mmio_flip(engine, obj);
|
||||
|
|
|
@ -360,19 +360,19 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
|
|||
struct drm_i915_gem_request *rq1)
|
||||
{
|
||||
|
||||
struct intel_engine_cs *engine = rq0->ring;
|
||||
struct intel_engine_cs *engine = rq0->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint64_t desc[2];
|
||||
|
||||
if (rq1) {
|
||||
desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
|
||||
desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
|
||||
rq1->elsp_submitted++;
|
||||
} else {
|
||||
desc[1] = 0;
|
||||
}
|
||||
|
||||
desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
|
||||
desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
|
||||
rq0->elsp_submitted++;
|
||||
|
||||
/* You must always write both descriptors in the order below. */
|
||||
|
@ -398,7 +398,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
|
|||
|
||||
static void execlists_update_context(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->ring;
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
|
||||
uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
|
||||
|
||||
|
@ -611,7 +611,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *engine)
|
|||
|
||||
static void execlists_context_queue(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
struct drm_i915_gem_request *cursor;
|
||||
int num_elements = 0;
|
||||
|
||||
|
@ -650,7 +650,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
|
|||
|
||||
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
uint32_t flush_domains;
|
||||
int ret;
|
||||
|
||||
|
@ -669,7 +669,7 @@ static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
|||
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
const unsigned other_rings = ~intel_ring_flag(req->ring);
|
||||
const unsigned other_rings = ~intel_ring_flag(req->engine);
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
|
@ -679,7 +679,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->active & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, req->ring, &req);
|
||||
ret = i915_gem_object_sync(obj, req->engine, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
|
||||
request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/*
|
||||
|
@ -719,7 +719,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||
}
|
||||
|
||||
if (request->ctx != request->i915->kernel_context)
|
||||
ret = intel_lr_context_pin(request->ctx, request->ring);
|
||||
ret = intel_lr_context_pin(request->ctx, request->engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -728,7 +728,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
|
|||
int bytes)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *target;
|
||||
unsigned space;
|
||||
int ret;
|
||||
|
@ -780,7 +780,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
|||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
struct drm_i915_private *dev_priv = request->i915;
|
||||
struct intel_engine_cs *engine = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
request->tail = ringbuf->tail;
|
||||
|
@ -897,7 +897,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
|||
int ret;
|
||||
|
||||
WARN_ON(req == NULL);
|
||||
dev_priv = req->ring->dev->dev_private;
|
||||
dev_priv = req->engine->dev->dev_private;
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
dev_priv->mm.interruptible);
|
||||
|
@ -949,7 +949,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->ring;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
|
||||
u64 exec_start;
|
||||
|
@ -963,7 +963,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
|
||||
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -992,7 +992,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (engine == &dev_priv->ring[RCS] &&
|
||||
if (engine == &dev_priv->engine[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_logical_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
|
@ -1073,7 +1073,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
|
|||
|
||||
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
if (!engine->gpu_caches_dirty)
|
||||
|
@ -1174,7 +1174,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
|
|||
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1647,7 +1647,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
|||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
|
||||
int i, ret;
|
||||
|
@ -1688,7 +1688,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||
* not idle). PML4 is allocated during ppgtt init so this is
|
||||
* not needed in 48-bit.*/
|
||||
if (req->ctx->ppgtt &&
|
||||
(intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
|
||||
(intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
|
||||
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
|
||||
!intel_vgpu_active(req->i915->dev)) {
|
||||
ret = intel_logical_ring_emit_pdps(req);
|
||||
|
@ -1696,7 +1696,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||
return ret;
|
||||
}
|
||||
|
||||
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
|
||||
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->engine);
|
||||
}
|
||||
|
||||
ret = intel_logical_ring_begin(req, 4);
|
||||
|
@ -1755,7 +1755,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
|||
u32 unused)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
struct intel_engine_cs *engine = ringbuf->ring;
|
||||
struct intel_engine_cs *engine = ringbuf->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t cmd;
|
||||
|
@ -1776,7 +1776,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
|||
|
||||
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
|
||||
cmd |= MI_INVALIDATE_TLB;
|
||||
if (engine == &dev_priv->ring[VCS])
|
||||
if (engine == &dev_priv->engine[VCS])
|
||||
cmd |= MI_INVALIDATE_BSD;
|
||||
}
|
||||
|
||||
|
@ -1796,7 +1796,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
|||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
struct intel_engine_cs *engine = ringbuf->ring;
|
||||
struct intel_engine_cs *engine = ringbuf->engine;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
bool vf_flush_wa = false;
|
||||
u32 flags = 0;
|
||||
|
@ -1919,7 +1919,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
|
|||
intel_logical_ring_emit(ringbuf,
|
||||
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
hws_seqno_address(request->ring) |
|
||||
hws_seqno_address(request->engine) |
|
||||
MI_FLUSH_DW_USE_GTT);
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||
|
@ -1946,7 +1946,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
|||
(PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_QW_WRITE));
|
||||
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
|
||||
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
||||
|
@ -1958,19 +1958,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
|
|||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(req->ring, &so);
|
||||
ret = i915_gem_render_state_prepare(req->engine, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = req->ring->emit_bb_start(req, so.ggtt_offset,
|
||||
ret = req->engine->emit_bb_start(req, so.ggtt_offset,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = req->ring->emit_bb_start(req,
|
||||
ret = req->engine->emit_bb_start(req,
|
||||
(so.ggtt_offset + so.aux_batch_offset),
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
|
@ -2117,7 +2117,7 @@ error:
|
|||
static int logical_render_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
engine->name = "render ring";
|
||||
|
@ -2170,7 +2170,7 @@ static int logical_render_ring_init(struct drm_device *dev)
|
|||
static int logical_bsd_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VCS];
|
||||
|
||||
engine->name = "bsd ring";
|
||||
engine->id = VCS;
|
||||
|
@ -2187,7 +2187,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
|
|||
static int logical_bsd2_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
|
||||
|
||||
engine->name = "bsd2 ring";
|
||||
engine->id = VCS2;
|
||||
|
@ -2204,7 +2204,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
|
|||
static int logical_blt_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[BCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[BCS];
|
||||
|
||||
engine->name = "blitter ring";
|
||||
engine->id = BCS;
|
||||
|
@ -2221,7 +2221,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
|
|||
static int logical_vebox_ring_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VECS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VECS];
|
||||
|
||||
engine->name = "video enhancement ring";
|
||||
engine->id = VECS;
|
||||
|
@ -2281,13 +2281,13 @@ int intel_logical_rings_init(struct drm_device *dev)
|
|||
return 0;
|
||||
|
||||
cleanup_vebox_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
|
||||
cleanup_blt_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
|
||||
cleanup_bsd_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
|
||||
cleanup_render_ring:
|
||||
intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -322,7 +322,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
|||
struct drm_i915_mocs_table t;
|
||||
int ret;
|
||||
|
||||
if (get_mocs_settings(req->ring->dev, &t)) {
|
||||
if (get_mocs_settings(req->engine->dev, &t)) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_ring_id ring_id;
|
||||
|
|
|
@ -233,7 +233,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
|
@ -267,7 +267,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
|
@ -336,7 +336,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
|
@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
|
|
@ -7365,7 +7365,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
|||
struct drm_i915_gem_request *req = boost->req;
|
||||
|
||||
if (!i915_gem_request_completed(req, true))
|
||||
gen6_rps_boost(to_i915(req->ring->dev), NULL,
|
||||
gen6_rps_boost(to_i915(req->engine->dev), NULL,
|
||||
req->emitted_jiffies);
|
||||
|
||||
i915_gem_request_unreference__unlocked(req);
|
||||
|
|
|
@ -79,7 +79,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -106,7 +106,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
@ -200,7 +200,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
@ -236,7 +236,7 @@ static int
|
|||
gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
@ -288,7 +288,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
|
@ -309,7 +309,7 @@ static int
|
|||
gen7_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
@ -373,7 +373,7 @@ static int
|
|||
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
|
||||
u32 flags, u32 scratch_addr)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
|
@ -396,7 +396,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
|
@ -704,7 +704,7 @@ err:
|
|||
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *w = &dev_priv->workarounds;
|
||||
|
@ -1269,7 +1269,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 8
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct intel_engine_cs *signaller = signaller_req->engine;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
|
@ -1310,7 +1310,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 6
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct intel_engine_cs *signaller = signaller_req->engine;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
|
@ -1348,7 +1348,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
struct intel_engine_cs *signaller = signaller_req->ring;
|
||||
struct intel_engine_cs *signaller = signaller_req->engine;
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *useless;
|
||||
|
@ -1393,7 +1393,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
|||
static int
|
||||
gen6_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
if (engine->semaphore.signal)
|
||||
|
@ -1434,7 +1434,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
|
|||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *waiter = waiter_req->ring;
|
||||
struct intel_engine_cs *waiter = waiter_req->engine;
|
||||
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
|
@ -1460,7 +1460,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
|
|||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *waiter = waiter_req->ring;
|
||||
struct intel_engine_cs *waiter = waiter_req->engine;
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
|
@ -1508,7 +1508,7 @@ do { \
|
|||
static int
|
||||
pc_render_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
@ -1706,7 +1706,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -1722,7 +1722,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
i9xx_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
|
@ -1868,7 +1868,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -1895,7 +1895,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 cs_offset = engine->scratch.gtt_offset;
|
||||
int ret;
|
||||
|
||||
|
@ -1957,7 +1957,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2187,7 +2187,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ring->ring = engine;
|
||||
ring->engine = engine;
|
||||
list_add(&ring->link, &engine->buffers);
|
||||
|
||||
ring->size = size;
|
||||
|
@ -2377,7 +2377,7 @@ int intel_ring_idle(struct intel_engine_cs *engine)
|
|||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
||||
{
|
||||
request->ringbuf = request->ring->buffer;
|
||||
request->ringbuf = request->engine->buffer;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2498,7 +2498,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
|
|||
int ret;
|
||||
|
||||
WARN_ON(req == NULL);
|
||||
engine = req->ring;
|
||||
engine = req->engine;
|
||||
dev_priv = engine->dev->dev_private;
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
|
@ -2517,7 +2517,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
|
|||
/* Align the ring tail to a cacheline boundary */
|
||||
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
int ret;
|
||||
|
||||
|
@ -2589,7 +2589,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
|
|||
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -2636,7 +2636,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
bool ppgtt = USES_PPGTT(engine->dev) &&
|
||||
!(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
@ -2662,7 +2662,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2687,7 +2687,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2710,7 +2710,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
static int gen6_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
@ -2756,7 +2756,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
|
|||
int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
|
@ -2907,7 +2907,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VCS];
|
||||
|
||||
engine->name = "bsd ring";
|
||||
engine->id = VCS;
|
||||
|
@ -2984,7 +2984,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|||
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
|
||||
|
||||
engine->name = "bsd2 ring";
|
||||
engine->id = VCS2;
|
||||
|
@ -3015,7 +3015,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
|||
int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[BCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[BCS];
|
||||
|
||||
engine->name = "blitter ring";
|
||||
engine->id = BCS;
|
||||
|
@ -3073,7 +3073,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
|||
int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine = &dev_priv->ring[VECS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[VECS];
|
||||
|
||||
engine->name = "video enhancement ring";
|
||||
engine->id = VECS;
|
||||
|
@ -3125,7 +3125,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
|||
int
|
||||
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
if (!engine->gpu_caches_dirty)
|
||||
|
@ -3144,7 +3144,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
|||
int
|
||||
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
uint32_t flush_domains;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ struct intel_ringbuffer {
|
|||
void __iomem *virtual_start;
|
||||
struct i915_vma *vma;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct list_head link;
|
||||
|
||||
u32 head;
|
||||
|
|
Loading…
Reference in New Issue