2018-03-15 05:15:19 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-03-20 21:17:04 +08:00
|
|
|
// Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
|
|
|
|
|
2017-06-23 22:11:08 +08:00
|
|
|
#include <linux/kernel.h>
|
2017-06-23 22:11:07 +08:00
|
|
|
#include <linux/percpu.h>
|
2017-06-23 22:11:08 +08:00
|
|
|
#include <linux/slab.h>
|
2017-06-23 22:11:07 +08:00
|
|
|
#include <linux/static_key.h>
|
|
|
|
#include <linux/interrupt.h>
|
2017-06-23 22:11:08 +08:00
|
|
|
#include <linux/idr.h>
|
2017-06-23 22:11:07 +08:00
|
|
|
#include <linux/irq.h>
|
2017-06-23 22:11:08 +08:00
|
|
|
|
|
|
|
#include <trace/events/irq.h>
|
2017-06-23 22:11:07 +08:00
|
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
|
|
|
DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct irq_timings, irq_timings);
|
|
|
|
|
2017-06-23 22:11:08 +08:00
|
|
|
struct irqt_stat {
|
2019-03-28 23:13:35 +08:00
|
|
|
u64 next_evt;
|
2017-06-23 22:11:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_IDR(irqt_stats);
|
|
|
|
|
2017-06-23 22:11:07 +08:00
|
|
|
void irq_timings_enable(void)
|
|
|
|
{
|
|
|
|
static_branch_enable(&irq_timing_enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
void irq_timings_disable(void)
|
|
|
|
{
|
|
|
|
static_branch_disable(&irq_timing_enabled);
|
|
|
|
}
|
2017-06-23 22:11:08 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* irq_timings_next_event - Return when the next event is supposed to arrive
|
|
|
|
*
|
|
|
|
* During the last busy cycle, the number of interrupts is incremented
|
|
|
|
* and stored in the irq_timings structure. This information is
|
|
|
|
* necessary to:
|
|
|
|
*
|
|
|
|
* - know if the index in the table wrapped up:
|
|
|
|
*
|
|
|
|
* If more than the array size interrupts happened during the
|
|
|
|
* last busy/idle cycle, the index wrapped up and we have to
|
|
|
|
* begin with the next element in the array which is the last one
|
|
|
|
* in the sequence, otherwise it is a the index 0.
|
|
|
|
*
|
|
|
|
* - have an indication of the interrupts activity on this CPU
|
|
|
|
* (eg. irq/sec)
|
|
|
|
*
|
|
|
|
* The values are 'consumed' after inserting in the statistical model,
|
|
|
|
* thus the count is reinitialized.
|
|
|
|
*
|
|
|
|
* The array of values **must** be browsed in the time direction, the
|
|
|
|
* timestamp must increase between an element and the next one.
|
|
|
|
*
|
|
|
|
* Returns a nanosec time based estimation of the earliest interrupt,
|
|
|
|
* U64_MAX otherwise.
|
|
|
|
*/
|
|
|
|
u64 irq_timings_next_event(u64 now)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This function must be called with the local irq disabled in
|
|
|
|
* order to prevent the timings circular buffer to be updated
|
|
|
|
* while we are reading it.
|
|
|
|
*/
|
2017-11-06 23:01:25 +08:00
|
|
|
lockdep_assert_irqs_disabled();
|
2017-06-23 22:11:08 +08:00
|
|
|
|
2019-03-28 23:13:35 +08:00
|
|
|
return 0;
|
2017-06-23 22:11:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void irq_timings_free(int irq)
|
|
|
|
{
|
|
|
|
struct irqt_stat __percpu *s;
|
|
|
|
|
|
|
|
s = idr_find(&irqt_stats, irq);
|
|
|
|
if (s) {
|
|
|
|
free_percpu(s);
|
|
|
|
idr_remove(&irqt_stats, irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int irq_timings_alloc(int irq)
|
|
|
|
{
|
|
|
|
struct irqt_stat __percpu *s;
|
|
|
|
int id;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some platforms can have the same private interrupt per cpu,
|
|
|
|
* so this function may be be called several times with the
|
|
|
|
* same interrupt number. Just bail out in case the per cpu
|
|
|
|
* stat structure is already allocated.
|
|
|
|
*/
|
|
|
|
s = idr_find(&irqt_stats, irq);
|
|
|
|
if (s)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = alloc_percpu(*s);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
idr_preload(GFP_KERNEL);
|
|
|
|
id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
|
|
|
|
idr_preload_end();
|
|
|
|
|
|
|
|
if (id < 0) {
|
|
|
|
free_percpu(s);
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|