sched/clock: Use static key for sched_clock_running
sched_clock_running may be read every time sched_clock_cpu() is called. Yet, this variable is updated only twice during boot, and never changes again, therefore it is better to make it a static key. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: steven.sistare@oracle.com Cc: daniel.m.jordan@oracle.com Cc: linux@armlinux.org.uk Cc: schwidefsky@de.ibm.com Cc: heiko.carstens@de.ibm.com Cc: john.stultz@linaro.org Cc: sboyd@codeaurora.org Cc: hpa@zytor.com Cc: douly.fnst@cn.fujitsu.com Cc: prarit@redhat.com Cc: feng.tang@intel.com Cc: pmladek@suse.com Cc: gnomes@lxorguk.ukuu.org.uk Cc: linux-s390@vger.kernel.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180719205545.16512-25-pasha.tatashin@oracle.com
This commit is contained in:
parent
857baa87b6
commit
46457ea464
|
@ -67,7 +67,7 @@ unsigned long long __weak sched_clock(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sched_clock);
|
||||
|
||||
__read_mostly int sched_clock_running;
|
||||
static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
/*
|
||||
|
@ -191,7 +191,7 @@ void clear_sched_clock_stable(void)
|
|||
|
||||
smp_mb(); /* matches sched_clock_init_late() */
|
||||
|
||||
if (sched_clock_running == 2)
|
||||
if (static_key_count(&sched_clock_running.key) == 2)
|
||||
__clear_sched_clock_stable();
|
||||
}
|
||||
|
||||
|
@ -215,7 +215,7 @@ void __init sched_clock_init(void)
|
|||
__sched_clock_gtod_offset();
|
||||
local_irq_restore(flags);
|
||||
|
||||
sched_clock_running = 1;
|
||||
static_branch_inc(&sched_clock_running);
|
||||
|
||||
/* Now that sched_clock_running is set adjust scd */
|
||||
local_irq_save(flags);
|
||||
|
@ -228,7 +228,7 @@ void __init sched_clock_init(void)
|
|||
*/
|
||||
static int __init sched_clock_init_late(void)
|
||||
{
|
||||
sched_clock_running = 2;
|
||||
static_branch_inc(&sched_clock_running);
|
||||
/*
|
||||
* Ensure that it is impossible to not do a static_key update.
|
||||
*
|
||||
|
@ -373,7 +373,7 @@ u64 sched_clock_cpu(int cpu)
|
|||
if (sched_clock_stable())
|
||||
return sched_clock() + __sched_clock_offset;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return sched_clock();
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
@ -396,7 +396,7 @@ void sched_clock_tick(void)
|
|||
if (sched_clock_stable())
|
||||
return;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
@ -455,13 +455,13 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
|||
|
||||
void __init sched_clock_init(void)
|
||||
{
|
||||
sched_clock_running = 1;
|
||||
static_branch_inc(&sched_clock_running);
|
||||
generic_sched_clock_init();
|
||||
}
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
if (unlikely(!sched_clock_running))
|
||||
if (!static_branch_unlikely(&sched_clock_running))
|
||||
return 0;
|
||||
|
||||
return sched_clock();
|
||||
|
|
|
@ -623,8 +623,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
|
|||
#undef PU
|
||||
}
|
||||
|
||||
extern __read_mostly int sched_clock_running;
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
|
Loading…
Reference in New Issue