drm/i915: introduce for_each_engine_id()
Equivalent to the existing for_each_engine() macro, this will replace the latter wherever the third argument *is* actually wanted (in most places, it is not used). The third argument is renamed to emphasise that it is an engine id (type enum intel_engine_id). All the callers of the macro that actually need the third argument are updated to use this version, and the argument (generally 'i') is also updated to be 'id'. Other callers (where the third argument is unused) are untouched for now; they will be updated in the next patch. Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
db18b6a64c
commit
c3232b1883
|
@ -132,7 +132,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
struct intel_engine_cs *engine;
|
||||
struct i915_vma *vma;
|
||||
int pin_count = 0;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
|
||||
&obj->base,
|
||||
|
@ -143,9 +143,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
obj->base.size / 1024,
|
||||
obj->base.read_domains,
|
||||
obj->base.write_domain);
|
||||
for_each_engine(engine, dev_priv, i)
|
||||
for_each_engine_id(engine, dev_priv, id)
|
||||
seq_printf(m, "%x ",
|
||||
i915_gem_request_get_seqno(obj->last_read_req[i]));
|
||||
i915_gem_request_get_seqno(obj->last_read_req[id]));
|
||||
seq_printf(m, "] %x %x%s%s%s",
|
||||
i915_gem_request_get_seqno(obj->last_write_req),
|
||||
i915_gem_request_get_seqno(obj->last_fenced_req),
|
||||
|
@ -1334,7 +1334,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
u64 acthd[I915_NUM_ENGINES];
|
||||
u32 seqno[I915_NUM_ENGINES];
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
int i, j;
|
||||
enum intel_engine_id id;
|
||||
int j;
|
||||
|
||||
if (!i915.enable_hangcheck) {
|
||||
seq_printf(m, "Hangcheck disabled\n");
|
||||
|
@ -1343,9 +1344,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
seqno[i] = engine->get_seqno(engine, false);
|
||||
acthd[i] = intel_ring_get_active_head(engine);
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
seqno[id] = engine->get_seqno(engine, false);
|
||||
acthd[id] = intel_ring_get_active_head(engine);
|
||||
}
|
||||
|
||||
i915_get_extra_instdone(dev, instdone);
|
||||
|
@ -1359,13 +1360,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
} else
|
||||
seq_printf(m, "Hangcheck inactive\n");
|
||||
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
seq_printf(m, "%s:\n", engine->name);
|
||||
seq_printf(m, "\tseqno = %x [current %x]\n",
|
||||
engine->hangcheck.seqno, seqno[i]);
|
||||
engine->hangcheck.seqno, seqno[id]);
|
||||
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
|
||||
(long long)engine->hangcheck.acthd,
|
||||
(long long)acthd[i]);
|
||||
(long long)acthd[id]);
|
||||
seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
|
||||
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
|
||||
|
||||
|
@ -1947,7 +1948,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx;
|
||||
int ret, i;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -1965,11 +1967,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
|
||||
if (i915.enable_execlists) {
|
||||
seq_putc(m, '\n');
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
ctx->engine[i].state;
|
||||
ctx->engine[id].state;
|
||||
struct intel_ringbuffer *ringbuf =
|
||||
ctx->engine[i].ringbuf;
|
||||
ctx->engine[id].ringbuf;
|
||||
|
||||
seq_printf(m, "%s: ", engine->name);
|
||||
if (ctx_obj)
|
||||
|
@ -3134,7 +3136,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
int i, j, ret;
|
||||
enum intel_engine_id id;
|
||||
int j, ret;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev)) {
|
||||
seq_puts(m, "Semaphores are disabled\n");
|
||||
|
@ -3153,14 +3156,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
|
||||
|
||||
seqno = (uint64_t *)kmap_atomic(page);
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
uint64_t offset;
|
||||
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
|
||||
seq_puts(m, " Last signal:");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i * I915_NUM_ENGINES + j;
|
||||
offset = id * I915_NUM_ENGINES + j;
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
|
@ -3168,7 +3171,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
|
||||
seq_puts(m, " Last wait: ");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i + (j * I915_NUM_ENGINES);
|
||||
offset = id + (j * I915_NUM_ENGINES);
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
|
@ -3178,7 +3181,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
kunmap_atomic(seqno);
|
||||
} else {
|
||||
seq_puts(m, " Last signal:");
|
||||
for_each_engine(engine, dev_priv, i)
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
for (j = 0; j < num_rings; j++)
|
||||
seq_printf(m, "0x%08x\n",
|
||||
I915_READ(engine->semaphore.mbox.signal[j]));
|
||||
|
@ -3186,7 +3189,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
seq_puts(m, "\nSync seqno:\n");
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
seq_printf(m, " 0x%08x ",
|
||||
engine->semaphore.sync_seqno[j]);
|
||||
|
@ -3236,6 +3239,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *workarounds = &dev_priv->workarounds;
|
||||
enum intel_engine_id id;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -3244,9 +3248,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
|
||||
for_each_engine(engine, dev_priv, i)
|
||||
for_each_engine_id(engine, dev_priv, id)
|
||||
seq_printf(m, "HW whitelist count for %s: %d\n",
|
||||
engine->name, workarounds->hw_whitelist_count[i]);
|
||||
engine->name, workarounds->hw_whitelist_count[id]);
|
||||
for (i = 0; i < workarounds->count; ++i) {
|
||||
i915_reg_t addr;
|
||||
u32 mask, value, read;
|
||||
|
|
|
@ -1995,6 +1995,15 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
|||
for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
|
||||
for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
|
||||
|
||||
/* Iterator with engine_id */
|
||||
#define for_each_engine_id(engine__, dev_priv__, id__) \
|
||||
for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
|
||||
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
|
||||
(engine__)++) \
|
||||
for_each_if (((id__) = (engine__)->id, \
|
||||
intel_engine_initialized(engine__)))
|
||||
|
||||
/* Iterator over subset of engines selected by mask */
|
||||
#define for_each_engine_masked(engine__, dev_priv__, mask__) \
|
||||
for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \
|
||||
for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__)))
|
||||
|
|
|
@ -846,7 +846,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct intel_engine_cs *to;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
||||
return;
|
||||
|
@ -856,7 +856,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||
i915_error_ggtt_object_create(dev_priv,
|
||||
dev_priv->semaphore_obj);
|
||||
|
||||
for_each_engine(to, dev_priv, i) {
|
||||
for_each_engine_id(to, dev_priv, id) {
|
||||
int idx;
|
||||
u16 signal_offset;
|
||||
u32 *tmp;
|
||||
|
@ -864,7 +864,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||
if (engine == to)
|
||||
continue;
|
||||
|
||||
signal_offset = (GEN8_SIGNAL_OFFSET(engine, i) & (PAGE_SIZE - 1))
|
||||
signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
|
||||
/ 4;
|
||||
tmp = error->semaphore_obj->pages[0];
|
||||
idx = intel_ring_sync_index(engine, to);
|
||||
|
|
|
@ -381,7 +381,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
|||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
|
||||
|
@ -390,7 +390,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
|||
desc.priority = client->priority;
|
||||
desc.db_id = client->doorbell_id;
|
||||
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint64_t ctx_desc;
|
||||
|
@ -402,7 +402,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
|||
* for now who owns a GuC client. But for future owner of GuC
|
||||
* client, need to make sure lrc is pinned prior to enter here.
|
||||
*/
|
||||
obj = ctx->engine[i].state;
|
||||
obj = ctx->engine[id].state;
|
||||
if (!obj)
|
||||
break; /* XXX: continue? */
|
||||
|
||||
|
@ -415,7 +415,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
|||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ctx->engine[i].ringbuf->obj;
|
||||
obj = ctx->engine[id].ringbuf->obj;
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
|
|
|
@ -3073,7 +3073,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
gpu_error.hangcheck_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_engine_cs *engine;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
int busy_count = 0, rings_hung = 0;
|
||||
bool stuck[I915_NUM_ENGINES] = { 0 };
|
||||
#define BUSY 1
|
||||
|
@ -3097,7 +3097,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
bool busy = true;
|
||||
|
@ -3157,7 +3157,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
break;
|
||||
case HANGCHECK_HUNG:
|
||||
engine->hangcheck.score += HUNG;
|
||||
stuck[i] = true;
|
||||
stuck[id] = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3184,10 +3184,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
busy_count += busy;
|
||||
}
|
||||
|
||||
for_each_engine(engine, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
DRM_INFO("%s on %s\n",
|
||||
stuck[i] ? "stuck" : "no progress",
|
||||
stuck[id] ? "stuck" : "no progress",
|
||||
engine->name);
|
||||
rings_hung |= intel_engine_flag(engine);
|
||||
}
|
||||
|
|
|
@ -325,11 +325,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
|||
if (get_mocs_settings(req->engine->dev, &t)) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id ring_id;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* Program the control registers */
|
||||
for_each_engine(engine, dev_priv, ring_id) {
|
||||
ret = emit_mocs_control_table(req, &t, ring_id);
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
ret = emit_mocs_control_table(req, &t, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1280,7 +1280,8 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
int i, ret, num_rings;
|
||||
enum intel_engine_id id;
|
||||
int ret, num_rings;
|
||||
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
|
@ -1290,9 +1291,9 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_engine(waiter, dev_priv, i) {
|
||||
for_each_engine_id(waiter, dev_priv, id) {
|
||||
u32 seqno;
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
|
||||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
|
@ -1321,7 +1322,8 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
int i, ret, num_rings;
|
||||
enum intel_engine_id id;
|
||||
int ret, num_rings;
|
||||
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
|
@ -1331,9 +1333,9 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_engine(waiter, dev_priv, i) {
|
||||
for_each_engine_id(waiter, dev_priv, id) {
|
||||
u32 seqno;
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
|
||||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
|
@ -1359,7 +1361,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
|||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *useless;
|
||||
int i, ret, num_rings;
|
||||
enum intel_engine_id id;
|
||||
int ret, num_rings;
|
||||
|
||||
#define MBOX_UPDATE_DWORDS 3
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
|
@ -1370,8 +1373,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_engine(useless, dev_priv, i) {
|
||||
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
for_each_engine_id(useless, dev_priv, id) {
|
||||
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
|
||||
|
||||
if (i915_mmio_reg_valid(mbox_reg)) {
|
||||
u32 seqno = i915_gem_request_get_seqno(signaller_req);
|
||||
|
|
Loading…
Reference in New Issue