drm/i915/pmu: Fix PMU enable vs execlists tasklet race
Commit99e48bf98d
("drm/i915: Lock out execlist tasklet while peeking inside for busy-stats") added a tasklet_disable call in busy stats enabling, but we failed to understand that the PMU enable callback runs as an hard IRQ (IPI). Consequence of this is that the PMU enable callback can interrupt the execlists tasklet, and will then deadlock when it calls intel_engine_stats_enable->tasklet_disable. To fix this, I realized it is possible to move the engine stats enablement and disablement to PMU event init and destroy hooks. This allows for much simpler implementation since those hooks run in normal context (can sleep). v2: Extract engine_event_destroy. (Chris Wilson) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Fixes:99e48bf98d
("drm/i915: Lock out execlist tasklet while peeking inside for busy-stats") Testcase: igt/perf_pmu/enable-race-* Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: intel-gfx@lists.freedesktop.org Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20180205093448.13877-1-tvrtko.ursulin@linux.intel.com (cherry picked from commitb2f78cda26
) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180213095747.2424-2-tvrtko.ursulin@linux.intel.com
This commit is contained in:
parent
edb76b01ac
commit
d3f84c8b09
|
@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
|
|||
return sum;
|
||||
}
|
||||
|
||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(event->parent);
|
||||
}
|
||||
|
||||
static int engine_event_init(struct perf_event *event)
|
||||
static void engine_event_destroy(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (!intel_engine_lookup_user(i915, engine_event_class(event),
|
||||
engine_event_instance(event)))
|
||||
return -ENODEV;
|
||||
engine = intel_engine_lookup_user(i915,
|
||||
engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (WARN_ON_ONCE(!engine))
|
||||
return;
|
||||
|
||||
switch (engine_event_sample(event)) {
|
||||
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
|
||||
intel_engine_supports_stats(engine))
|
||||
intel_disable_engine_stats(engine);
|
||||
}
|
||||
|
||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(event->parent);
|
||||
|
||||
if (is_engine_event(event))
|
||||
engine_event_destroy(event);
|
||||
}
|
||||
|
||||
static int
|
||||
engine_event_status(struct intel_engine_cs *engine,
|
||||
enum drm_i915_pmu_engine_sample sample)
|
||||
{
|
||||
switch (sample) {
|
||||
case I915_SAMPLE_BUSY:
|
||||
case I915_SAMPLE_WAIT:
|
||||
break;
|
||||
case I915_SAMPLE_SEMA:
|
||||
if (INTEL_GEN(i915) < 6)
|
||||
if (INTEL_GEN(engine->i915) < 6)
|
||||
return -ENODEV;
|
||||
break;
|
||||
default:
|
||||
|
@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int engine_event_init(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
u8 sample;
|
||||
int ret;
|
||||
|
||||
engine = intel_engine_lookup_user(i915, engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (!engine)
|
||||
return -ENODEV;
|
||||
|
||||
sample = engine_event_sample(event);
|
||||
ret = engine_event_status(engine, sample);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
|
||||
ret = intel_enable_engine_stats(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -387,7 +426,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
|
|||
if (WARN_ON_ONCE(!engine)) {
|
||||
/* Do nothing */
|
||||
} else if (sample == I915_SAMPLE_BUSY &&
|
||||
engine->pmu.busy_stats) {
|
||||
intel_engine_supports_stats(engine)) {
|
||||
val = ktime_to_ns(intel_engine_get_busy_time(engine));
|
||||
} else {
|
||||
val = engine->pmu.sample[sample].cur;
|
||||
|
@ -442,12 +481,6 @@ again:
|
|||
local64_add(new - prev, &event->count);
|
||||
}
|
||||
|
||||
static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_engine_supports_stats(engine) &&
|
||||
(engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
|
||||
}
|
||||
|
||||
static void i915_pmu_enable(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -487,21 +520,7 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||
|
||||
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
|
||||
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
|
||||
if (engine->pmu.enable_count[sample]++ == 0) {
|
||||
/*
|
||||
* Enable engine busy stats tracking if needed or
|
||||
* alternatively cancel the scheduled disable.
|
||||
*
|
||||
* If the delayed disable was pending, cancel it and
|
||||
* in this case do not enable since it already is.
|
||||
*/
|
||||
if (engine_needs_busy_stats(engine) &&
|
||||
!engine->pmu.busy_stats) {
|
||||
engine->pmu.busy_stats = true;
|
||||
if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
|
||||
intel_enable_engine_stats(engine);
|
||||
}
|
||||
}
|
||||
engine->pmu.enable_count[sample]++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -514,14 +533,6 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
}
|
||||
|
||||
static void __disable_busy_stats(struct work_struct *work)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
|
||||
|
||||
intel_disable_engine_stats(engine);
|
||||
}
|
||||
|
||||
static void i915_pmu_disable(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -545,26 +556,8 @@ static void i915_pmu_disable(struct perf_event *event)
|
|||
* Decrement the reference count and clear the enabled
|
||||
* bitmask when the last listener on an event goes away.
|
||||
*/
|
||||
if (--engine->pmu.enable_count[sample] == 0) {
|
||||
if (--engine->pmu.enable_count[sample] == 0)
|
||||
engine->pmu.enable &= ~BIT(sample);
|
||||
if (!engine_needs_busy_stats(engine) &&
|
||||
engine->pmu.busy_stats) {
|
||||
engine->pmu.busy_stats = false;
|
||||
/*
|
||||
* We request a delayed disable to handle the
|
||||
* rapid on/off cycles on events, which can
|
||||
* happen when tools like perf stat start, in a
|
||||
* nicer way.
|
||||
*
|
||||
* In addition, this also helps with busy stats
|
||||
* accuracy with background CPU offline/online
|
||||
* migration events.
|
||||
*/
|
||||
queue_delayed_work(system_wq,
|
||||
&engine->pmu.disable_busy_stats,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
|
||||
|
@ -797,8 +790,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
|
|||
|
||||
void i915_pmu_register(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
if (INTEL_GEN(i915) <= 2) {
|
||||
|
@ -820,10 +811,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
i915->pmu.timer.function = i915_sample;
|
||||
|
||||
for_each_engine(engine, i915, id)
|
||||
INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
|
||||
__disable_busy_stats);
|
||||
|
||||
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -843,9 +830,6 @@ err:
|
|||
|
||||
void i915_pmu_unregister(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (!i915->pmu.base.event_init)
|
||||
return;
|
||||
|
||||
|
@ -853,11 +837,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
|
|||
|
||||
hrtimer_cancel(&i915->pmu.timer);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
GEM_BUG_ON(engine->pmu.busy_stats);
|
||||
flush_delayed_work(&engine->pmu.disable_busy_stats);
|
||||
}
|
||||
|
||||
i915_pmu_unregister_cpuhp_state(i915);
|
||||
|
||||
perf_pmu_unregister(&i915->pmu.base);
|
||||
|
|
|
@ -366,20 +366,6 @@ struct intel_engine_cs {
|
|||
*/
|
||||
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
|
||||
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
|
||||
/**
|
||||
* @busy_stats: Has enablement of engine stats tracking been
|
||||
* requested.
|
||||
*/
|
||||
bool busy_stats;
|
||||
/**
|
||||
* @disable_busy_stats: Work item for busy stats disabling.
|
||||
*
|
||||
* Same as with @enable_busy_stats action, with the difference
|
||||
* that we delay it in case there are rapid enable-disable
|
||||
* actions, which can happen during tool startup (like perf
|
||||
* stat).
|
||||
*/
|
||||
struct delayed_work disable_busy_stats;
|
||||
} pmu;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue