drm/i915/gt: Include the execlists CCID of each port in the engine dump
Since we print out EXECLISTS_STATUS in the dump, also print out the CCID of each context so we can cross check between the two. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200331094239.23145-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
9171555572
commit
606727842d
|
@ -1221,6 +1221,49 @@ static void print_request(struct drm_printer *m,
|
||||||
name);
|
name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct intel_timeline *get_timeline(struct i915_request *rq)
|
||||||
|
{
|
||||||
|
struct intel_timeline *tl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even though we are holding the engine->active.lock here, there
|
||||||
|
* is no control over the submission queue per-se and we are
|
||||||
|
* inspecting the active state at a random point in time, with an
|
||||||
|
* unknown queue. Play safe and make sure the timeline remains valid.
|
||||||
|
* (Only being used for pretty printing, one extra kref shouldn't
|
||||||
|
* cause a camel stampede!)
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
tl = rcu_dereference(rq->timeline);
|
||||||
|
if (!kref_get_unless_zero(&tl->kref))
|
||||||
|
tl = NULL;
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return tl;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int print_ring(char *buf, int sz, struct i915_request *rq)
|
||||||
|
{
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (!i915_request_signaled(rq)) {
|
||||||
|
struct intel_timeline *tl = get_timeline(rq);
|
||||||
|
|
||||||
|
len = scnprintf(buf, sz,
|
||||||
|
"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
|
||||||
|
i915_ggtt_offset(rq->ring->vma),
|
||||||
|
tl ? tl->hwsp_offset : 0,
|
||||||
|
hwsp_seqno(rq),
|
||||||
|
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
|
||||||
|
1000 * 1000));
|
||||||
|
|
||||||
|
if (tl)
|
||||||
|
intel_timeline_put(tl);
|
||||||
|
}
|
||||||
|
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
||||||
{
|
{
|
||||||
const size_t rowsize = 8 * sizeof(u32);
|
const size_t rowsize = 8 * sizeof(u32);
|
||||||
|
@ -1250,27 +1293,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct intel_timeline *get_timeline(struct i915_request *rq)
|
|
||||||
{
|
|
||||||
struct intel_timeline *tl;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Even though we are holding the engine->active.lock here, there
|
|
||||||
* is no control over the submission queue per-se and we are
|
|
||||||
* inspecting the active state at a random point in time, with an
|
|
||||||
* unknown queue. Play safe and make sure the timeline remains valid.
|
|
||||||
* (Only being used for pretty printing, one extra kref shouldn't
|
|
||||||
* cause a camel stampede!)
|
|
||||||
*/
|
|
||||||
rcu_read_lock();
|
|
||||||
tl = rcu_dereference(rq->timeline);
|
|
||||||
if (!kref_get_unless_zero(&tl->kref))
|
|
||||||
tl = NULL;
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
return tl;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *repr_timer(const struct timer_list *t)
|
static const char *repr_timer(const struct timer_list *t)
|
||||||
{
|
{
|
||||||
if (!READ_ONCE(t->expires))
|
if (!READ_ONCE(t->expires))
|
||||||
|
@ -1383,39 +1405,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
len = scnprintf(hdr, sizeof(hdr),
|
len = scnprintf(hdr, sizeof(hdr),
|
||||||
"\t\tActive[%d]: ",
|
"\t\tActive[%d]: ccid:%08x, ",
|
||||||
(int)(port - execlists->active));
|
(int)(port - execlists->active),
|
||||||
if (!i915_request_signaled(rq)) {
|
upper_32_bits(rq->context->lrc_desc));
|
||||||
struct intel_timeline *tl = get_timeline(rq);
|
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||||
|
|
||||||
len += scnprintf(hdr + len, sizeof(hdr) - len,
|
|
||||||
"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
|
|
||||||
i915_ggtt_offset(rq->ring->vma),
|
|
||||||
tl ? tl->hwsp_offset : 0,
|
|
||||||
hwsp_seqno(rq),
|
|
||||||
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
|
|
||||||
1000 * 1000));
|
|
||||||
|
|
||||||
if (tl)
|
|
||||||
intel_timeline_put(tl);
|
|
||||||
}
|
|
||||||
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||||
print_request(m, rq, hdr);
|
print_request(m, rq, hdr);
|
||||||
}
|
}
|
||||||
for (port = execlists->pending; (rq = *port); port++) {
|
for (port = execlists->pending; (rq = *port); port++) {
|
||||||
struct intel_timeline *tl = get_timeline(rq);
|
char hdr[160];
|
||||||
char hdr[80];
|
int len;
|
||||||
|
|
||||||
snprintf(hdr, sizeof(hdr),
|
len = scnprintf(hdr, sizeof(hdr),
|
||||||
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
|
"\t\tPending[%d]: ccid:%08x, ",
|
||||||
(int)(port - execlists->pending),
|
(int)(port - execlists->pending),
|
||||||
i915_ggtt_offset(rq->ring->vma),
|
upper_32_bits(rq->context->lrc_desc));
|
||||||
tl ? tl->hwsp_offset : 0,
|
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||||
hwsp_seqno(rq));
|
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||||
print_request(m, rq, hdr);
|
print_request(m, rq, hdr);
|
||||||
|
|
||||||
if (tl)
|
|
||||||
intel_timeline_put(tl);
|
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
execlists_active_unlock_bh(execlists);
|
execlists_active_unlock_bh(execlists);
|
||||||
|
|
Loading…
Reference in New Issue