drm/i915/gt: Convert stats.active to plain unsigned int

As context-in/out is now always serialised, we do not have to worry
about concurrent enabling/disable of the busy-stats and can reduce the
atomic_t active to a plain unsigned int, and the seqlock to a seqcount.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210115142331.24458-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2021-01-15 14:23:29 +00:00
parent 4fb05a392a
commit f530a41d13
3 changed files with 34 additions and 23 deletions

View File

@ -342,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
seqlock_init(&engine->stats.lock);
seqcount_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@ -1754,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
* add it to the total.
*/
*now = ktime_get();
if (atomic_read(&engine->stats.active))
if (READ_ONCE(engine->stats.active))
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
@ -1773,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
ktime_t total;
do {
seq = read_seqbegin(&engine->stats.lock);
seq = read_seqcount_begin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine, now);
} while (read_seqretry(&engine->stats.lock, seq));
} while (read_seqcount_retry(&engine->stats.lock, seq));
return total;
}

View File

@ -17,33 +17,44 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
if (atomic_add_unless(&engine->stats.active, 1, 0))
if (engine->stats.active) {
engine->stats.active++;
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
engine->stats.start = ktime_get();
atomic_inc(&engine->stats.active);
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
/* The writer is serialised; but the pmu reader may be from hardirq */
local_irq_save(flags);
write_seqcount_begin(&engine->stats.lock);
engine->stats.start = ktime_get();
engine->stats.active++;
write_seqcount_end(&engine->stats.lock);
local_irq_restore(flags);
GEM_BUG_ON(!engine->stats.active);
}
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
unsigned long flags;
GEM_BUG_ON(!atomic_read(&engine->stats.active));
if (atomic_add_unless(&engine->stats.active, -1, 1))
GEM_BUG_ON(!engine->stats.active);
if (engine->stats.active > 1) {
engine->stats.active--;
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (atomic_dec_and_test(&engine->stats.active)) {
engine->stats.total =
ktime_add(engine->stats.total,
ktime_sub(ktime_get(), engine->stats.start));
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
local_irq_save(flags);
write_seqcount_begin(&engine->stats.lock);
engine->stats.active--;
engine->stats.total =
ktime_add(engine->stats.total,
ktime_sub(ktime_get(), engine->stats.start));
write_seqcount_end(&engine->stats.lock);
local_irq_restore(flags);
}
#endif /* __INTEL_ENGINE_STATS_H__ */

View File

@ -516,12 +516,12 @@ struct intel_engine_cs {
/**
* @active: Number of contexts currently scheduled in.
*/
atomic_t active;
unsigned int active;
/**
* @lock: Lock protecting the below fields.
*/
seqlock_t lock;
seqcount_t lock;
/**
* @total: Total time this engine was busy.