drm/i915: Add intel_context tracing

Add intel_context tracing. These trace points are particular helpful
when debugging the GuC firmware and can be enabled via
CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS kernel config option.

Cc: John Harrison <john.c.harrison@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-19-matthew.brost@intel.com
This commit is contained in:
Matthew Brost 2021-07-21 14:51:01 -07:00 committed by John Harrison
parent dbf9da8d55
commit e03b59064b
3 changed files with 165 additions and 0 deletions

View File

@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "intel_context.h"
#include "intel_engine.h"
@ -28,6 +29,7 @@ static void rcu_context_free(struct rcu_head *rcu)
{
struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
trace_intel_context_free(ce);
kmem_cache_free(global.slab_ce, ce);
}
@ -46,6 +48,7 @@ intel_context_create(struct intel_engine_cs *engine)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, engine);
trace_intel_context_create(ce);
return ce;
}
@ -268,6 +271,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
trace_intel_context_do_pin(ce);
err_unlock:
mutex_unlock(&ce->pin_mutex);
err_post_unpin:
@ -323,6 +328,7 @@ void __intel_context_do_unpin(struct intel_context *ce, int sub)
*/
intel_context_get(ce);
intel_context_active_release(ce);
trace_intel_context_do_unpin(ce);
intel_context_put(ce);
}

View File

@ -344,6 +344,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
if (!enabled && !err) {
trace_intel_context_sched_enable(ce);
atomic_inc(&guc->outstanding_submission_g2h);
set_context_enabled(ce);
} else if (!enabled) {
@ -815,6 +816,8 @@ static int register_context(struct intel_context *ce)
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
ce->guc_id * sizeof(struct guc_lrc_desc);
trace_intel_context_register(ce);
return __guc_action_register_context(guc, ce->guc_id, offset);
}
@ -835,6 +838,8 @@ static int deregister_context(struct intel_context *ce, u32 guc_id)
{
struct intel_guc *guc = ce_to_guc(ce);
trace_intel_context_deregister(ce);
return __guc_action_deregister_context(guc, guc_id);
}
@ -908,6 +913,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce)
* registering this context.
*/
if (context_registered) {
trace_intel_context_steal_guc_id(ce);
set_context_wait_for_deregister_to_register(ce);
intel_context_get(ce);
@ -971,6 +977,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
trace_intel_context_sched_disable(ce);
intel_context_get(ce);
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
@ -1133,6 +1140,9 @@ static void __guc_signal_context_fence(struct intel_context *ce)
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
i915_sw_fence_complete(&rq->submit);
@ -1538,6 +1548,8 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
if (unlikely(!ce))
return -EPROTO;
trace_intel_context_deregister_done(ce);
if (context_wait_for_deregister_to_register(ce)) {
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
@ -1589,6 +1601,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
return -EPROTO;
}
trace_intel_context_sched_done(ce);
if (context_pending_enable(ce)) {
clr_context_pending_enable(ce);
} else if (context_pending_disable(ce)) {

View File

@ -895,6 +895,91 @@ TRACE_EVENT(i915_request_out,
__entry->ctx, __entry->seqno, __entry->completed)
);
DECLARE_EVENT_CLASS(intel_context,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce),
TP_STRUCT__entry(
__field(u32, guc_id)
__field(int, pin_count)
__field(u32, sched_state)
__field(u32, guc_sched_state_no_lock)
),
TP_fast_assign(
__entry->guc_id = ce->guc_id;
__entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state;
__entry->guc_sched_state_no_lock =
atomic_read(&ce->guc_sched_state_no_lock);
),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x",
__entry->guc_id, __entry->pin_count,
__entry->sched_state,
__entry->guc_sched_state_no_lock)
);
DEFINE_EVENT(intel_context, intel_context_register,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_deregister,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_deregister_done,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_sched_enable,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_sched_disable,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_sched_done,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_create,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_fence_release,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_free,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_do_pin,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
DEFINE_EVENT(intel_context, intel_context_do_unpin,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
);
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
@ -921,6 +1006,66 @@ static inline void
trace_i915_request_out(struct i915_request *rq)
{
}
static inline void
trace_intel_context_register(struct intel_context *ce)
{
}
static inline void
trace_intel_context_deregister(struct intel_context *ce)
{
}
static inline void
trace_intel_context_deregister_done(struct intel_context *ce)
{
}
static inline void
trace_intel_context_sched_enable(struct intel_context *ce)
{
}
static inline void
trace_intel_context_sched_disable(struct intel_context *ce)
{
}
static inline void
trace_intel_context_sched_done(struct intel_context *ce)
{
}
static inline void
trace_intel_context_create(struct intel_context *ce)
{
}
static inline void
trace_intel_context_fence_release(struct intel_context *ce)
{
}
static inline void
trace_intel_context_free(struct intel_context *ce)
{
}
static inline void
trace_intel_context_steal_guc_id(struct intel_context *ce)
{
}
static inline void
trace_intel_context_do_pin(struct intel_context *ce)
{
}
static inline void
trace_intel_context_do_unpin(struct intel_context *ce)
{
}
#endif
#endif