2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* linux/kernel/time/tick-sched.c
|
|
|
|
*
|
|
|
|
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
|
|
|
|
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
|
|
|
|
*
|
|
|
|
* No idle tick implementation for low and high resolution timers
|
|
|
|
*
|
|
|
|
* Started by: Thomas Gleixner and Ingo Molnar
|
|
|
|
*
|
2008-01-30 20:30:00 +08:00
|
|
|
* Distribute under GPLv2.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/percpu.h>
|
2017-02-09 01:51:31 +08:00
|
|
|
#include <linux/nmi.h>
|
2007-02-16 17:28:03 +08:00
|
|
|
#include <linux/profile.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-01 23:36:40 +08:00
|
|
|
#include <linux/sched/clock.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/stat.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/nohz.h>
|
2008-08-05 02:59:11 +08:00
|
|
|
#include <linux/module.h>
|
2012-11-08 04:03:07 +08:00
|
|
|
#include <linux/irq_work.h>
|
2013-04-20 21:43:57 +08:00
|
|
|
#include <linux/posix-timers.h>
|
2013-07-10 06:55:25 +08:00
|
|
|
#include <linux/context_tracking.h>
|
2017-08-29 21:07:54 +08:00
|
|
|
#include <linux/mm.h>
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2007-02-25 14:10:13 +08:00
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
#include "tick-internal.h"
|
|
|
|
|
2013-04-20 23:35:50 +08:00
|
|
|
#include <trace/events/timer.h>
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
2016-07-01 18:42:35 +08:00
|
|
|
* Per-CPU nohz control structure
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
2015-03-25 20:07:37 +08:00
|
|
|
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
[PATCH] Add debugging feature /proc/timer_list
add /proc/timer_list, which prints all currently pending (high-res) timers,
all clock-event sources and their parameters in a human-readable form.
Sample output:
Timer List Version: v0.1
HRTIMER_MAX_CLOCK_BASES: 2
now at 4246046273872 nsecs
cpu: 0
clock 0:
.index: 0
.resolution: 1 nsecs
.get_time: ktime_get_real
.offset: 1273998312645738432 nsecs
active timers:
clock 1:
.index: 1
.resolution: 1 nsecs
.get_time: ktime_get
.offset: 0 nsecs
active timers:
#0: <f5a90ec8>, hrtimer_sched_tick, hrtimer_stop_sched_tick, swapper/0
# expires at 4246432689566 nsecs [in 386415694 nsecs]
#1: <f5a90ec8>, hrtimer_wakeup, do_nanosleep, pcscd/2050
# expires at 4247018194689 nsecs [in 971920817 nsecs]
#2: <f5a90ec8>, hrtimer_wakeup, do_nanosleep, irqbalance/1909
# expires at 4247351358392 nsecs [in 1305084520 nsecs]
#3: <f5a90ec8>, hrtimer_wakeup, do_nanosleep, crond/2157
# expires at 4249097614968 nsecs [in 3051341096 nsecs]
#4: <f5a90ec8>, it_real_fn, do_setitimer, syslogd/1888
# expires at 4251329900926 nsecs [in 5283627054 nsecs]
.expires_next : 4246432689566 nsecs
.hres_active : 1
.check_clocks : 0
.nr_events : 31306
.idle_tick : 4246020791890 nsecs
.tick_stopped : 1
.idle_jiffies : 986504
.idle_calls : 40700
.idle_sleeps : 36014
.idle_entrytime : 4246019418883 nsecs
.idle_sleeptime : 4178181972709 nsecs
cpu: 1
clock 0:
.index: 0
.resolution: 1 nsecs
.get_time: ktime_get_real
.offset: 1273998312645738432 nsecs
active timers:
clock 1:
.index: 1
.resolution: 1 nsecs
.get_time: ktime_get
.offset: 0 nsecs
active timers:
#0: <f5a90ec8>, hrtimer_sched_tick, hrtimer_restart_sched_tick, swapper/0
# expires at 4246050084568 nsecs [in 3810696 nsecs]
#1: <f5a90ec8>, hrtimer_wakeup, do_nanosleep, atd/2227
# expires at 4261010635003 nsecs [in 14964361131 nsecs]
#2: <f5a90ec8>, hrtimer_wakeup, do_nanosleep, smartd/2332
# expires at 5469485798970 nsecs [in 1223439525098 nsecs]
.expires_next : 4246050084568 nsecs
.hres_active : 1
.check_clocks : 0
.nr_events : 24043
.idle_tick : 4246046084568 nsecs
.tick_stopped : 0
.idle_jiffies : 986510
.idle_calls : 26360
.idle_sleeps : 22551
.idle_entrytime : 4246043874339 nsecs
.idle_sleeptime : 4170763761184 nsecs
tick_broadcast_mask: 00000003
event_broadcast_mask: 00000001
CPU#0's local event device:
Clock Event Device: lapic
capabilities: 0000000e
max_delta_ns: 807385544
min_delta_ns: 1443
mult: 44624025
shift: 32
set_next_event: lapic_next_event
set_mode: lapic_timer_setup
event_handler: hrtimer_interrupt
.installed: 1
.expires: 4246432689566 nsecs
CPU#1's local event device:
Clock Event Device: lapic
capabilities: 0000000e
max_delta_ns: 807385544
min_delta_ns: 1443
mult: 44624025
shift: 32
set_next_event: lapic_next_event
set_mode: lapic_timer_setup
event_handler: hrtimer_interrupt
.installed: 1
.expires: 4246050084568 nsecs
Clock Event Device: hpet
capabilities: 00000007
max_delta_ns: 2147483647
min_delta_ns: 3352
mult: 61496110
shift: 32
set_next_event: hpet_next_event
set_mode: hpet_set_mode
event_handler: handle_nextevt_broadcast
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 17:28:15 +08:00
|
|
|
struct tick_sched *tick_get_tick_sched(int cpu)
|
|
|
|
{
|
|
|
|
return &per_cpu(tick_cpu_sched, cpu);
|
|
|
|
}
|
|
|
|
|
2016-01-25 23:41:49 +08:00
|
|
|
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
|
|
|
/*
|
|
|
|
* The time, when the last jiffy update happened. Protected by jiffies_lock.
|
|
|
|
*/
|
|
|
|
static ktime_t last_jiffies_update;
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* Must be called with interrupts disabled !
|
|
|
|
*/
|
|
|
|
static void tick_do_update_jiffies64(ktime_t now)
|
|
|
|
{
|
|
|
|
unsigned long ticks = 0;
|
|
|
|
ktime_t delta;
|
|
|
|
|
2008-05-12 21:43:53 +08:00
|
|
|
/*
|
2012-02-29 08:50:11 +08:00
|
|
|
* Do a quick check without holding jiffies_lock:
|
2008-05-12 21:43:53 +08:00
|
|
|
*/
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
2016-12-25 18:38:40 +08:00
|
|
|
if (delta < tick_period)
|
2008-05-12 21:43:53 +08:00
|
|
|
return;
|
|
|
|
|
2016-06-29 12:51:50 +08:00
|
|
|
/* Reevaluate with jiffies_lock held */
|
2012-02-29 08:50:11 +08:00
|
|
|
write_seqlock(&jiffies_lock);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
delta = ktime_sub(now, last_jiffies_update);
|
2016-12-25 18:38:40 +08:00
|
|
|
if (delta >= tick_period) {
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
delta = ktime_sub(delta, tick_period);
|
|
|
|
last_jiffies_update = ktime_add(last_jiffies_update,
|
|
|
|
tick_period);
|
|
|
|
|
|
|
|
/* Slow path for long timeouts */
|
2016-12-25 18:38:40 +08:00
|
|
|
if (unlikely(delta >= tick_period)) {
|
2007-02-16 17:28:03 +08:00
|
|
|
s64 incr = ktime_to_ns(tick_period);
|
|
|
|
|
|
|
|
ticks = ktime_divns(delta, incr);
|
|
|
|
|
|
|
|
last_jiffies_update = ktime_add_ns(last_jiffies_update,
|
|
|
|
incr * ticks);
|
|
|
|
}
|
|
|
|
do_timer(++ticks);
|
2008-09-23 00:56:01 +08:00
|
|
|
|
|
|
|
/* Keep the tick_next_period variable up to date */
|
|
|
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
2014-04-15 13:24:40 +08:00
|
|
|
} else {
|
|
|
|
write_sequnlock(&jiffies_lock);
|
|
|
|
return;
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
2012-02-29 08:50:11 +08:00
|
|
|
write_sequnlock(&jiffies_lock);
|
2013-12-13 05:10:55 +08:00
|
|
|
update_wall_time();
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize and return retrieve the jiffies update.
|
|
|
|
*/
|
|
|
|
static ktime_t tick_init_jiffy_update(void)
|
|
|
|
{
|
|
|
|
ktime_t period;
|
|
|
|
|
2012-02-29 08:50:11 +08:00
|
|
|
write_seqlock(&jiffies_lock);
|
2007-02-16 17:28:03 +08:00
|
|
|
/* Did we start the jiffies update yet ? */
|
2016-12-25 18:38:40 +08:00
|
|
|
if (last_jiffies_update == 0)
|
2007-02-16 17:28:03 +08:00
|
|
|
last_jiffies_update = tick_next_period;
|
|
|
|
period = last_jiffies_update;
|
2012-02-29 08:50:11 +08:00
|
|
|
write_sequnlock(&jiffies_lock);
|
2007-02-16 17:28:03 +08:00
|
|
|
return period;
|
|
|
|
}
|
|
|
|
|
2012-10-15 08:03:27 +08:00
|
|
|
|
|
|
|
static void tick_sched_do_timer(ktime_t now)
|
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
2011-08-11 05:21:01 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
2012-10-15 08:03:27 +08:00
|
|
|
/*
|
|
|
|
* Check if the do_timer duty was dropped. We don't care about
|
2016-07-01 18:42:35 +08:00
|
|
|
* concurrency: This happens only when the CPU in charge went
|
|
|
|
* into a long sleep. If two CPUs happen to assign themselves to
|
2012-10-15 08:03:27 +08:00
|
|
|
* this duty, then the jiffies update is still serialized by
|
2012-11-22 03:31:52 +08:00
|
|
|
* jiffies_lock.
|
2012-10-15 08:03:27 +08:00
|
|
|
*/
|
2012-12-19 01:24:35 +08:00
|
|
|
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
|
2013-04-12 22:45:34 +08:00
|
|
|
&& !tick_nohz_full_cpu(cpu))
|
2012-10-15 08:03:27 +08:00
|
|
|
tick_do_timer_cpu = cpu;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Check, if the jiffies need an update */
|
|
|
|
if (tick_do_timer_cpu == cpu)
|
|
|
|
tick_do_update_jiffies64(now);
|
|
|
|
}
|
|
|
|
|
2012-10-15 08:43:03 +08:00
|
|
|
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
|
|
|
{
|
2011-08-11 05:21:01 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
2012-10-15 08:43:03 +08:00
|
|
|
/*
|
|
|
|
* When we are idle and the tick is stopped, we have to touch
|
|
|
|
* the watchdog as we might not schedule for a really long
|
|
|
|
* time. This happens on complete idle SMP systems while
|
|
|
|
* waiting on the login prompt. We also increment the "start of
|
|
|
|
* idle" jiffy stamp so the idle accounting adjustment we do
|
|
|
|
* when we go busy again does not account too much ticks.
|
|
|
|
*/
|
|
|
|
if (ts->tick_stopped) {
|
2015-12-09 00:28:04 +08:00
|
|
|
touch_softlockup_watchdog_sched();
|
2012-10-15 08:43:03 +08:00
|
|
|
if (is_idle_task(current))
|
|
|
|
ts->idle_jiffies++;
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
/*
|
|
|
|
* In case the current tick fired too early past its expected
|
|
|
|
* expiration, make sure we don't bypass the next clock reprogramming
|
|
|
|
* to the same deadline.
|
|
|
|
*/
|
|
|
|
ts->next_tick = 0;
|
2012-10-15 08:43:03 +08:00
|
|
|
}
|
2012-10-15 22:17:16 +08:00
|
|
|
#endif
|
2012-10-15 08:43:03 +08:00
|
|
|
update_process_times(user_mode(regs));
|
|
|
|
profile_tick(CPU_PROFILING);
|
|
|
|
}
|
2016-01-25 23:41:49 +08:00
|
|
|
#endif
|
2012-10-15 08:43:03 +08:00
|
|
|
|
2013-04-12 22:45:34 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
2013-07-25 05:52:27 +08:00
|
|
|
cpumask_var_t tick_nohz_full_mask;
|
2013-07-25 05:31:00 +08:00
|
|
|
bool tick_nohz_full_running;
|
2016-03-24 22:38:00 +08:00
|
|
|
static atomic_t tick_dep_mask;
|
2012-12-19 00:32:19 +08:00
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
static bool check_tick_dependency(atomic_t *dep)
|
2015-06-07 21:54:30 +08:00
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
int val = atomic_read(dep);
|
|
|
|
|
|
|
|
if (val & TICK_DEP_MASK_POSIX_TIMER) {
|
2015-12-11 10:27:25 +08:00
|
|
|
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
|
2016-03-24 22:38:00 +08:00
|
|
|
return true;
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (val & TICK_DEP_MASK_PERF_EVENTS) {
|
2015-12-11 10:27:25 +08:00
|
|
|
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
|
2016-03-24 22:38:00 +08:00
|
|
|
return true;
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (val & TICK_DEP_MASK_SCHED) {
|
2015-12-11 10:27:25 +08:00
|
|
|
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
|
2016-03-24 22:38:00 +08:00
|
|
|
return true;
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
|
2015-12-11 10:27:25 +08:00
|
|
|
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
|
2016-03-24 22:38:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
2016-09-07 18:51:13 +08:00
|
|
|
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
|
2013-04-20 21:43:57 +08:00
|
|
|
{
|
2017-11-06 23:01:20 +08:00
|
|
|
lockdep_assert_irqs_disabled();
|
2013-04-20 21:43:57 +08:00
|
|
|
|
2016-09-07 18:51:13 +08:00
|
|
|
if (unlikely(!cpu_online(cpu)))
|
|
|
|
return false;
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (check_tick_dependency(&tick_dep_mask))
|
2015-06-07 21:54:30 +08:00
|
|
|
return false;
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (check_tick_dependency(&ts->tick_dep_mask))
|
2015-06-07 21:54:30 +08:00
|
|
|
return false;
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (check_tick_dependency(¤t->tick_dep_mask))
|
2015-06-07 21:54:30 +08:00
|
|
|
return false;
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
if (check_tick_dependency(¤t->signal->tick_dep_mask))
|
2015-06-07 21:54:30 +08:00
|
|
|
return false;
|
|
|
|
|
2013-04-20 21:43:57 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-07 21:54:30 +08:00
|
|
|
static void nohz_full_kick_func(struct irq_work *work)
|
2013-04-18 06:15:40 +08:00
|
|
|
{
|
2015-05-28 01:22:08 +08:00
|
|
|
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
|
2013-04-18 06:15:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
2015-06-07 21:54:30 +08:00
|
|
|
.func = nohz_full_kick_func,
|
2013-04-18 06:15:40 +08:00
|
|
|
};
|
|
|
|
|
2014-08-14 00:50:16 +08:00
|
|
|
/*
|
|
|
|
* Kick this CPU if it's full dynticks in order to force it to
|
|
|
|
* re-evaluate its dependency on the tick and restart it if necessary.
|
|
|
|
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
|
|
|
|
* is NMI safe.
|
|
|
|
*/
|
2015-07-16 23:42:29 +08:00
|
|
|
static void tick_nohz_full_kick(void)
|
2014-08-14 00:50:16 +08:00
|
|
|
{
|
|
|
|
if (!tick_nohz_full_cpu(smp_processor_id()))
|
|
|
|
return;
|
|
|
|
|
2014-10-27 23:49:45 +08:00
|
|
|
irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
|
2014-08-14 00:50:16 +08:00
|
|
|
}
|
|
|
|
|
2013-04-18 06:15:40 +08:00
|
|
|
/*
|
2014-06-04 22:17:33 +08:00
|
|
|
* Kick the CPU if it's full dynticks in order to force it to
|
2013-04-18 06:15:40 +08:00
|
|
|
* re-evaluate its dependency on the tick and restart it if necessary.
|
|
|
|
*/
|
2014-06-04 22:17:33 +08:00
|
|
|
void tick_nohz_full_kick_cpu(int cpu)
|
2013-04-18 06:15:40 +08:00
|
|
|
{
|
2014-06-04 22:17:33 +08:00
|
|
|
if (!tick_nohz_full_cpu(cpu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
|
2013-04-18 06:15:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kick all full dynticks CPUs in order to force these to re-evaluate
|
|
|
|
* their dependency on the tick and restart it if necessary.
|
|
|
|
*/
|
2015-07-18 04:25:49 +08:00
|
|
|
static void tick_nohz_full_kick_all(void)
|
2013-04-18 06:15:40 +08:00
|
|
|
{
|
2015-12-07 23:55:23 +08:00
|
|
|
int cpu;
|
|
|
|
|
2013-07-25 05:31:00 +08:00
|
|
|
if (!tick_nohz_full_running)
|
2013-04-18 06:15:40 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
preempt_disable();
|
2015-12-07 23:55:23 +08:00
|
|
|
for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
|
|
|
|
tick_nohz_full_kick_cpu(cpu);
|
2013-04-18 06:15:40 +08:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
static void tick_nohz_dep_set_all(atomic_t *dep,
|
2015-06-07 21:54:30 +08:00
|
|
|
enum tick_dep_bits bit)
|
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
int prev;
|
2015-06-07 21:54:30 +08:00
|
|
|
|
2016-04-22 02:35:25 +08:00
|
|
|
prev = atomic_fetch_or(BIT(bit), dep);
|
2015-06-07 21:54:30 +08:00
|
|
|
if (!prev)
|
|
|
|
tick_nohz_full_kick_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a global tick dependency. Used by perf events that rely on freq and
|
|
|
|
* by unstable clock.
|
|
|
|
*/
|
|
|
|
void tick_nohz_dep_set(enum tick_dep_bits bit)
|
|
|
|
{
|
|
|
|
tick_nohz_dep_set_all(&tick_dep_mask, bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tick_nohz_dep_clear(enum tick_dep_bits bit)
|
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
atomic_andnot(BIT(bit), &tick_dep_mask);
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set per-CPU tick dependency. Used by scheduler and perf events in order to
|
|
|
|
* manage events throttling.
|
|
|
|
*/
|
|
|
|
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
|
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
int prev;
|
2015-06-07 21:54:30 +08:00
|
|
|
struct tick_sched *ts;
|
|
|
|
|
|
|
|
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
|
|
|
|
|
2016-04-22 02:35:25 +08:00
|
|
|
prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
|
2015-06-07 21:54:30 +08:00
|
|
|
if (!prev) {
|
|
|
|
preempt_disable();
|
|
|
|
/* Perf needs local kick that is NMI safe */
|
|
|
|
if (cpu == smp_processor_id()) {
|
|
|
|
tick_nohz_full_kick();
|
|
|
|
} else {
|
|
|
|
/* Remote irq work not NMI-safe */
|
|
|
|
if (!WARN_ON_ONCE(in_nmi()))
|
|
|
|
tick_nohz_full_kick_cpu(cpu);
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
|
|
|
|
|
2016-03-24 22:38:00 +08:00
|
|
|
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
|
|
|
|
* per task timers.
|
|
|
|
*/
|
|
|
|
void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We could optimize this with just kicking the target running the task
|
|
|
|
* if that noise matters for nohz full users.
|
|
|
|
*/
|
|
|
|
tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
|
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
|
|
|
|
* per process timers.
|
|
|
|
*/
|
|
|
|
void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
|
|
|
|
{
|
|
|
|
tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
|
|
|
|
{
|
2016-03-24 22:38:00 +08:00
|
|
|
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
|
2015-06-07 21:54:30 +08:00
|
|
|
}
|
|
|
|
|
2013-04-20 23:11:50 +08:00
|
|
|
/*
|
|
|
|
* Re-evaluate the need for the tick as we switch the current task.
|
|
|
|
* It might need the tick due to per task/process properties:
|
2016-07-01 18:42:35 +08:00
|
|
|
* perf events, posix CPU timers, ...
|
2013-04-20 23:11:50 +08:00
|
|
|
*/
|
2015-06-12 00:07:12 +08:00
|
|
|
void __tick_nohz_task_switch(void)
|
2013-04-20 23:11:50 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2015-06-07 21:54:30 +08:00
|
|
|
struct tick_sched *ts;
|
2013-04-20 23:11:50 +08:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
2013-04-28 11:25:58 +08:00
|
|
|
if (!tick_nohz_full_cpu(smp_processor_id()))
|
|
|
|
goto out;
|
|
|
|
|
2015-06-07 21:54:30 +08:00
|
|
|
ts = this_cpu_ptr(&tick_cpu_sched);
|
2013-04-20 23:11:50 +08:00
|
|
|
|
2015-06-07 21:54:30 +08:00
|
|
|
if (ts->tick_stopped) {
|
2016-03-24 22:38:00 +08:00
|
|
|
if (atomic_read(¤t->tick_dep_mask) ||
|
|
|
|
atomic_read(¤t->signal->tick_dep_mask))
|
2015-06-07 21:54:30 +08:00
|
|
|
tick_nohz_full_kick();
|
|
|
|
}
|
2013-04-28 11:25:58 +08:00
|
|
|
out:
|
2013-04-20 23:11:50 +08:00
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2017-10-27 10:42:36 +08:00
|
|
|
/* Get the boot-time nohz CPU list from the kernel parameters. */
|
|
|
|
void __init tick_nohz_full_setup(cpumask_var_t cpumask)
|
2012-12-19 00:32:19 +08:00
|
|
|
{
|
2013-07-25 05:31:00 +08:00
|
|
|
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
|
2017-10-27 10:42:36 +08:00
|
|
|
cpumask_copy(tick_nohz_full_mask, cpumask);
|
2013-07-25 05:31:00 +08:00
|
|
|
tick_nohz_full_running = true;
|
2012-12-19 00:32:19 +08:00
|
|
|
}
|
|
|
|
|
2016-11-18 02:35:34 +08:00
|
|
|
static int tick_nohz_cpu_down(unsigned int cpu)
|
2012-12-19 01:24:35 +08:00
|
|
|
{
|
2016-11-18 02:35:34 +08:00
|
|
|
/*
|
|
|
|
* The boot CPU handles housekeeping duty (unbound timers,
|
|
|
|
* workqueues, timekeeping, ...) on behalf of full dynticks
|
|
|
|
* CPUs. It must remain online when nohz full is enabled.
|
|
|
|
*/
|
|
|
|
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
|
|
|
|
return -EBUSY;
|
|
|
|
return 0;
|
2012-12-19 01:24:35 +08:00
|
|
|
}
|
|
|
|
|
2013-03-27 06:47:24 +08:00
|
|
|
void __init tick_nohz_init(void)
|
2012-12-19 00:32:19 +08:00
|
|
|
{
|
2016-11-18 02:35:34 +08:00
|
|
|
int cpu, ret;
|
2013-03-27 06:47:24 +08:00
|
|
|
|
2017-12-01 07:36:35 +08:00
|
|
|
if (!tick_nohz_full_running)
|
|
|
|
return;
|
2013-03-27 06:47:24 +08:00
|
|
|
|
2014-08-18 07:36:07 +08:00
|
|
|
/*
|
|
|
|
* Full dynticks uses irq work to drive the tick rescheduling on safe
|
|
|
|
* locking contexts. But then we need irq work to raise its own
|
|
|
|
* interrupts to avoid circular dependency on the tick
|
|
|
|
*/
|
|
|
|
if (!arch_irq_work_has_interrupt()) {
|
2016-03-23 05:28:09 +08:00
|
|
|
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
|
2014-08-18 07:36:07 +08:00
|
|
|
cpumask_clear(tick_nohz_full_mask);
|
|
|
|
tick_nohz_full_running = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-18 04:02:55 +08:00
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
|
|
|
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
|
2016-03-23 05:28:09 +08:00
|
|
|
pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
|
|
|
|
cpu);
|
2014-08-18 04:02:55 +08:00
|
|
|
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
|
|
|
|
}
|
|
|
|
|
2013-07-25 05:31:00 +08:00
|
|
|
for_each_cpu(cpu, tick_nohz_full_mask)
|
2013-07-10 06:55:25 +08:00
|
|
|
context_tracking_cpu_set(cpu);
|
|
|
|
|
2016-11-18 02:35:34 +08:00
|
|
|
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
|
|
|
"kernel/nohz:predown", NULL,
|
|
|
|
tick_nohz_cpu_down);
|
|
|
|
WARN_ON(ret < 0);
|
2015-02-14 06:37:31 +08:00
|
|
|
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
|
|
|
|
cpumask_pr_args(tick_nohz_full_mask));
|
2012-12-19 00:32:19 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* NOHZ - aka dynamic tick functionality
|
|
|
|
*/
|
2011-08-11 05:21:01 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* NO HZ enabled ?
|
|
|
|
*/
|
2016-03-18 05:23:00 +08:00
|
|
|
bool tick_nohz_enabled __read_mostly = true;
|
2015-05-27 06:50:33 +08:00
|
|
|
unsigned long tick_nohz_active __read_mostly;
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* Enable / Disable tickless mode
|
|
|
|
*/
|
|
|
|
static int __init setup_tick_nohz(char *str)
|
|
|
|
{
|
2016-03-18 05:23:00 +08:00
|
|
|
return (kstrtobool(str, &tick_nohz_enabled) == 0);
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
__setup("nohz=", setup_tick_nohz);
|
|
|
|
|
2018-02-21 12:17:24 +08:00
|
|
|
bool tick_nohz_tick_stopped(void)
|
2015-03-25 20:07:37 +08:00
|
|
|
{
|
|
|
|
return __this_cpu_read(tick_cpu_sched.tick_stopped);
|
|
|
|
}
|
|
|
|
|
2018-02-21 12:17:25 +08:00
|
|
|
bool tick_nohz_tick_stopped_cpu(int cpu)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
|
|
|
|
|
|
|
|
return ts->tick_stopped;
|
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/**
|
|
|
|
* tick_nohz_update_jiffies - update jiffies when idle was interrupted
|
|
|
|
*
|
|
|
|
* Called from interrupt entry when the CPU was idle
|
|
|
|
*
|
|
|
|
* In case the sched_tick was stopped on this CPU, we have to check if jiffies
|
|
|
|
* must be updated. Otherwise an interrupt handler could use a stale jiffy
|
2016-07-01 18:42:35 +08:00
|
|
|
* value. We do this unconditionally on any CPU, as we don't know whether the
|
|
|
|
* CPU, which has the update task assigned is in a long sleep.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
2009-09-29 20:25:15 +08:00
|
|
|
static void tick_nohz_update_jiffies(ktime_t now)
|
2007-02-16 17:28:03 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-08-08 04:28:01 +08:00
|
|
|
__this_cpu_write(tick_cpu_sched.idle_waketime, now);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
tick_do_update_jiffies64(now);
|
|
|
|
local_irq_restore(flags);
|
2008-05-12 21:43:53 +08:00
|
|
|
|
2015-12-09 00:28:04 +08:00
|
|
|
touch_softlockup_watchdog_sched();
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
2010-05-09 23:22:45 +08:00
|
|
|
/*
|
2016-07-01 18:42:35 +08:00
|
|
|
* Updates the per-CPU time idle statistics counters
|
2010-05-09 23:22:45 +08:00
|
|
|
*/
|
2010-05-09 23:24:03 +08:00
|
|
|
static void
|
2010-07-01 15:07:17 +08:00
|
|
|
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
2008-01-30 20:30:04 +08:00
|
|
|
{
|
2009-09-29 20:25:15 +08:00
|
|
|
ktime_t delta;
|
2008-01-30 20:30:04 +08:00
|
|
|
|
2010-05-09 23:22:45 +08:00
|
|
|
if (ts->idle_active) {
|
|
|
|
delta = ktime_sub(now, ts->idle_entrytime);
|
2010-07-01 15:07:17 +08:00
|
|
|
if (nr_iowait_cpu(cpu) > 0)
|
2010-05-09 23:25:23 +08:00
|
|
|
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
|
2011-08-24 15:37:48 +08:00
|
|
|
else
|
|
|
|
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
2010-05-09 23:23:23 +08:00
|
|
|
ts->idle_entrytime = now;
|
2010-05-09 23:22:45 +08:00
|
|
|
}
|
2010-05-09 23:24:03 +08:00
|
|
|
|
2010-05-09 23:24:39 +08:00
|
|
|
if (last_update_time)
|
2010-05-09 23:24:03 +08:00
|
|
|
*last_update_time = ktime_to_us(now);
|
|
|
|
|
2010-05-09 23:22:45 +08:00
|
|
|
}
|
|
|
|
|
2013-08-08 04:28:01 +08:00
|
|
|
static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
|
2010-05-09 23:22:45 +08:00
|
|
|
{
|
2013-08-08 04:28:01 +08:00
|
|
|
update_ts_time_stats(smp_processor_id(), ts, now, NULL);
|
2009-09-29 20:25:15 +08:00
|
|
|
ts->idle_active = 0;
|
2008-09-01 22:44:23 +08:00
|
|
|
|
2017-04-21 18:26:23 +08:00
|
|
|
sched_clock_idle_wakeup_event();
|
2008-01-30 20:30:04 +08:00
|
|
|
}
|
|
|
|
|
2013-08-08 04:28:01 +08:00
|
|
|
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
2008-01-30 20:30:04 +08:00
|
|
|
{
|
2011-12-02 00:00:22 +08:00
|
|
|
ktime_t now = ktime_get();
|
2010-05-09 23:22:45 +08:00
|
|
|
|
2008-01-30 20:30:04 +08:00
|
|
|
ts->idle_entrytime = now;
|
|
|
|
ts->idle_active = 1;
|
2008-09-01 22:44:23 +08:00
|
|
|
sched_clock_idle_sleep_event();
|
2008-01-30 20:30:04 +08:00
|
|
|
return now;
|
|
|
|
}
|
|
|
|
|
2010-05-09 23:22:08 +08:00
|
|
|
/**
|
2016-07-01 18:42:35 +08:00
|
|
|
* get_cpu_idle_time_us - get the total idle time of a CPU
|
2010-05-09 23:22:08 +08:00
|
|
|
* @cpu: CPU number to query
|
2011-08-24 15:39:30 +08:00
|
|
|
* @last_update_time: variable to store update time in. Do not update
|
|
|
|
* counters if NULL.
|
2010-05-09 23:22:08 +08:00
|
|
|
*
|
2016-06-29 12:51:50 +08:00
|
|
|
* Return the cumulative idle time (since boot) for a given
|
2011-08-24 15:37:48 +08:00
|
|
|
* CPU, in microseconds.
|
2010-05-09 23:22:08 +08:00
|
|
|
*
|
|
|
|
* This time is measured via accounting rather than sampling,
|
|
|
|
* and is as accurate as ktime_get() is.
|
|
|
|
*
|
|
|
|
* This function returns -1 if NOHZ is not enabled.
|
|
|
|
*/
|
2008-01-30 20:30:04 +08:00
|
|
|
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
2011-08-24 15:39:30 +08:00
|
|
|
ktime_t now, idle;
|
2008-01-30 20:30:04 +08:00
|
|
|
|
2013-11-14 04:01:57 +08:00
|
|
|
if (!tick_nohz_active)
|
2008-08-05 02:59:11 +08:00
|
|
|
return -1;
|
|
|
|
|
2011-08-24 15:39:30 +08:00
|
|
|
now = ktime_get();
|
|
|
|
if (last_update_time) {
|
|
|
|
update_ts_time_stats(cpu, ts, now, last_update_time);
|
|
|
|
idle = ts->idle_sleeptime;
|
|
|
|
} else {
|
|
|
|
if (ts->idle_active && !nr_iowait_cpu(cpu)) {
|
|
|
|
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
|
|
|
|
|
|
|
|
idle = ktime_add(ts->idle_sleeptime, delta);
|
|
|
|
} else {
|
|
|
|
idle = ts->idle_sleeptime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ktime_to_us(idle);
|
2008-08-05 02:59:11 +08:00
|
|
|
|
2008-01-30 20:30:04 +08:00
|
|
|
}
|
2008-08-05 02:59:11 +08:00
|
|
|
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
|
2008-01-30 20:30:04 +08:00
|
|
|
|
2011-08-24 15:37:48 +08:00
|
|
|
/**
|
2016-07-01 18:42:35 +08:00
|
|
|
* get_cpu_iowait_time_us - get the total iowait time of a CPU
|
2010-05-09 23:25:23 +08:00
|
|
|
* @cpu: CPU number to query
|
2011-08-24 15:39:30 +08:00
|
|
|
* @last_update_time: variable to store update time in. Do not update
|
|
|
|
* counters if NULL.
|
2010-05-09 23:25:23 +08:00
|
|
|
*
|
2016-06-29 12:51:50 +08:00
|
|
|
* Return the cumulative iowait time (since boot) for a given
|
2010-05-09 23:25:23 +08:00
|
|
|
* CPU, in microseconds.
|
|
|
|
*
|
|
|
|
* This time is measured via accounting rather than sampling,
|
|
|
|
* and is as accurate as ktime_get() is.
|
|
|
|
*
|
|
|
|
* This function returns -1 if NOHZ is not enabled.
|
|
|
|
*/
|
|
|
|
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
2011-08-24 15:39:30 +08:00
|
|
|
ktime_t now, iowait;
|
2010-05-09 23:25:23 +08:00
|
|
|
|
2013-11-14 04:01:57 +08:00
|
|
|
if (!tick_nohz_active)
|
2010-05-09 23:25:23 +08:00
|
|
|
return -1;
|
|
|
|
|
2011-08-24 15:39:30 +08:00
|
|
|
now = ktime_get();
|
|
|
|
if (last_update_time) {
|
|
|
|
update_ts_time_stats(cpu, ts, now, last_update_time);
|
|
|
|
iowait = ts->iowait_sleeptime;
|
|
|
|
} else {
|
|
|
|
if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
|
|
|
|
ktime_t delta = ktime_sub(now, ts->idle_entrytime);
|
2010-05-09 23:25:23 +08:00
|
|
|
|
2011-08-24 15:39:30 +08:00
|
|
|
iowait = ktime_add(ts->iowait_sleeptime, delta);
|
|
|
|
} else {
|
|
|
|
iowait = ts->iowait_sleeptime;
|
|
|
|
}
|
|
|
|
}
|
2010-05-09 23:25:23 +08:00
|
|
|
|
2011-08-24 15:39:30 +08:00
|
|
|
return ktime_to_us(iowait);
|
2010-05-09 23:25:23 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
|
|
|
|
|
2015-04-15 05:08:54 +08:00
|
|
|
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
|
|
|
{
|
|
|
|
hrtimer_cancel(&ts->sched_timer);
|
|
|
|
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
|
|
|
|
|
|
|
|
/* Forward the time to expire in the future */
|
|
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
|
|
|
|
|
|
|
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
|
|
|
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
|
|
|
else
|
|
|
|
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset to make sure next tick stop doesn't get fooled by past
|
|
|
|
* cached clock deadline.
|
|
|
|
*/
|
|
|
|
ts->next_tick = 0;
|
2015-04-15 05:08:54 +08:00
|
|
|
}
|
|
|
|
|
2017-12-22 22:51:13 +08:00
|
|
|
static inline bool local_timer_softirq_pending(void)
|
|
|
|
{
|
|
|
|
return local_softirq_pending() & TIMER_SOFTIRQ;
|
|
|
|
}
|
|
|
|
|
2011-08-01 07:25:38 +08:00
|
|
|
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|
|
|
ktime_t now, int cpu)
|
2007-02-16 17:28:03 +08:00
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
2015-04-15 05:08:58 +08:00
|
|
|
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
|
|
|
|
unsigned long seq, basejiff;
|
|
|
|
ktime_t tick;
|
2013-12-17 07:16:37 +08:00
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/* Read jiffies and the time when jiffies were updated last */
|
|
|
|
do {
|
2012-02-29 08:50:11 +08:00
|
|
|
seq = read_seqbegin(&jiffies_lock);
|
2016-12-25 18:38:40 +08:00
|
|
|
basemono = last_jiffies_update;
|
2015-04-15 05:08:58 +08:00
|
|
|
basejiff = jiffies;
|
2012-02-29 08:50:11 +08:00
|
|
|
} while (read_seqretry(&jiffies_lock, seq));
|
2015-04-15 05:08:58 +08:00
|
|
|
ts->last_jiffies = basejiff;
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2017-12-22 22:51:13 +08:00
|
|
|
/*
|
|
|
|
* Keep the periodic tick, when RCU, architecture or irq_work
|
|
|
|
* requests it.
|
|
|
|
* Aside of that check whether the local timer softirq is
|
|
|
|
* pending. If so its a bad idea to call get_next_timer_interrupt()
|
|
|
|
* because there is an already expired timer, so it will request
|
|
|
|
* immeditate expiry, which rearms the hardware timer with a
|
|
|
|
* minimal delta which brings us back to this place
|
|
|
|
* immediately. Lather, rinse and repeat...
|
|
|
|
*/
|
|
|
|
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
|
|
|
|
irq_work_needs_cpu() || local_timer_softirq_pending()) {
|
2015-04-15 05:08:58 +08:00
|
|
|
next_tick = basemono + TICK_NSEC;
|
2009-09-29 20:25:16 +08:00
|
|
|
} else {
|
2015-04-15 05:08:58 +08:00
|
|
|
/*
|
|
|
|
* Get the next pending timer. If high resolution
|
|
|
|
* timers are enabled this only takes the timer wheel
|
|
|
|
* timers into account. If high resolution timers are
|
|
|
|
* disabled this also looks at the next expiring
|
|
|
|
* hrtimer.
|
|
|
|
*/
|
|
|
|
next_tmr = get_next_timer_interrupt(basejiff, basemono);
|
|
|
|
ts->next_timer = next_tmr;
|
|
|
|
/* Take the next rcu event into account */
|
|
|
|
next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
|
2009-09-29 20:25:16 +08:00
|
|
|
}
|
2013-04-26 16:05:59 +08:00
|
|
|
|
2015-04-15 05:08:58 +08:00
|
|
|
/*
|
|
|
|
* If the tick is due in the next period, keep it ticking or
|
2015-11-20 00:21:06 +08:00
|
|
|
* force prod the timer.
|
2015-04-15 05:08:58 +08:00
|
|
|
*/
|
|
|
|
delta = next_tick - basemono;
|
|
|
|
if (delta <= (u64)TICK_NSEC) {
|
2016-07-04 17:50:36 +08:00
|
|
|
/*
|
|
|
|
* Tell the timer code that the base is not idle, i.e. undo
|
|
|
|
* the effect of get_next_timer_interrupt():
|
|
|
|
*/
|
|
|
|
timer_clear_idle();
|
2015-11-20 00:21:06 +08:00
|
|
|
/*
|
|
|
|
* We've not stopped the tick yet, and there's a timer in the
|
|
|
|
* next period, so no point in stopping it either, bail.
|
|
|
|
*/
|
2017-06-01 22:47:09 +08:00
|
|
|
if (!ts->tick_stopped) {
|
|
|
|
tick = 0;
|
2015-04-15 05:08:56 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
2016-07-01 18:42:35 +08:00
|
|
|
* If this CPU is the one which updates jiffies, then give up
|
|
|
|
* the assignment and let it be taken by the CPU which runs
|
|
|
|
* the tick timer next, which might be this CPU as well. If we
|
2015-04-15 05:08:56 +08:00
|
|
|
* don't drop this here the jiffies might be stale and
|
|
|
|
* do_timer() never invoked. Keep track of the fact that it
|
2016-07-01 18:42:35 +08:00
|
|
|
* was the one which had the do_timer() duty last. If this CPU
|
2015-04-15 05:08:56 +08:00
|
|
|
* is the one which had the do_timer() duty last, we limit the
|
2016-06-29 12:51:50 +08:00
|
|
|
* sleep time to the timekeeping max_deferment value.
|
2015-04-15 05:08:58 +08:00
|
|
|
* Otherwise we can sleep as long as we want.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
2015-04-15 05:08:58 +08:00
|
|
|
delta = timekeeping_max_deferment();
|
2015-04-15 05:08:56 +08:00
|
|
|
if (cpu == tick_do_timer_cpu) {
|
|
|
|
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
|
|
|
ts->do_timer_last = 1;
|
|
|
|
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
|
2015-04-15 05:08:58 +08:00
|
|
|
delta = KTIME_MAX;
|
2015-04-15 05:08:56 +08:00
|
|
|
ts->do_timer_last = 0;
|
|
|
|
} else if (!ts->do_timer_last) {
|
2015-04-15 05:08:58 +08:00
|
|
|
delta = KTIME_MAX;
|
2015-04-15 05:08:56 +08:00
|
|
|
}
|
2009-11-13 05:12:06 +08:00
|
|
|
|
2015-04-15 05:08:58 +08:00
|
|
|
/* Calculate the next expiry time */
|
|
|
|
if (delta < (KTIME_MAX - basemono))
|
|
|
|
expires = basemono + delta;
|
2015-04-15 05:08:56 +08:00
|
|
|
else
|
2015-04-15 05:08:58 +08:00
|
|
|
expires = KTIME_MAX;
|
|
|
|
|
|
|
|
expires = min_t(u64, expires, next_tick);
|
2016-12-25 18:38:40 +08:00
|
|
|
tick = expires;
|
2008-12-02 06:18:11 +08:00
|
|
|
|
2015-04-15 05:08:56 +08:00
|
|
|
/* Skip reprogram of event if its not changed */
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
if (ts->tick_stopped && (expires == ts->next_tick)) {
|
|
|
|
/* Sanity check: make sure clockevent is actually programmed */
|
2017-06-13 12:04:14 +08:00
|
|
|
if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
|
2017-05-11 22:36:19 +08:00
|
|
|
goto out;
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
|
|
|
|
basemono, ts->next_tick, dev->next_event,
|
|
|
|
hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
|
2017-05-11 22:36:19 +08:00
|
|
|
}
|
2011-08-01 07:25:38 +08:00
|
|
|
|
2015-04-15 05:08:56 +08:00
|
|
|
/*
|
|
|
|
* nohz_stop_sched_tick can be called several times before
|
|
|
|
* the nohz_restart_sched_tick is called. This happens when
|
|
|
|
* interrupts arrive which do not cause a reschedule. In the
|
|
|
|
* first call we save the current tick time, so we can restart
|
|
|
|
* the scheduler tick in nohz_restart_sched_tick.
|
|
|
|
*/
|
|
|
|
if (!ts->tick_stopped) {
|
2017-06-19 10:12:00 +08:00
|
|
|
calc_load_nohz_start();
|
2016-04-13 21:56:51 +08:00
|
|
|
cpu_load_update_nohz_start();
|
2017-08-29 21:07:54 +08:00
|
|
|
quiet_vmstat();
|
2007-05-08 15:30:03 +08:00
|
|
|
|
2015-04-15 05:08:56 +08:00
|
|
|
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
|
|
|
|
ts->tick_stopped = 1;
|
2015-12-11 10:27:25 +08:00
|
|
|
trace_tick_stop(1, TICK_DEP_MASK_NONE);
|
2015-04-15 05:08:56 +08:00
|
|
|
}
|
2007-05-30 05:47:39 +08:00
|
|
|
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
ts->next_tick = tick;
|
|
|
|
|
2015-04-15 05:08:56 +08:00
|
|
|
/*
|
2015-04-15 05:08:58 +08:00
|
|
|
* If the expiration time == KTIME_MAX, then we simply stop
|
|
|
|
* the tick timer.
|
2015-04-15 05:08:56 +08:00
|
|
|
*/
|
2015-04-15 05:08:58 +08:00
|
|
|
if (unlikely(expires == KTIME_MAX)) {
|
2015-04-15 05:08:56 +08:00
|
|
|
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
|
|
|
hrtimer_cancel(&ts->sched_timer);
|
|
|
|
goto out;
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
2015-04-15 05:08:54 +08:00
|
|
|
|
2017-06-13 12:04:14 +08:00
|
|
|
hrtimer_set_expires(&ts->sched_timer, tick);
|
|
|
|
|
2015-04-15 05:08:56 +08:00
|
|
|
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
2017-06-13 12:04:14 +08:00
|
|
|
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
2015-04-15 05:08:56 +08:00
|
|
|
else
|
2015-04-15 05:08:58 +08:00
|
|
|
tick_program_event(tick, 1);
|
2007-02-16 17:28:03 +08:00
|
|
|
out:
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
/*
|
|
|
|
* Update the estimated sleep length until the next timer
|
|
|
|
* (not only the tick).
|
|
|
|
*/
|
cpuidle: consolidate 2.6.22 cpuidle branch into one patch
commit e5a16b1f9eec0af7cfa0830304b41c1c0833cf9f
Author: Len Brown <len.brown@intel.com>
Date: Tue Oct 2 23:44:44 2007 -0400
cpuidle: shrink diff
processor_idle.c | 440 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 429 insertions(+), 11 deletions(-)
Signed-off-by: Len Brown <len.brown@intel.com>
commit dfbb9d5aedfb18848a3e0d6f6e3e4969febb209c
Author: Len Brown <len.brown@intel.com>
Date: Wed Sep 26 02:17:55 2007 -0400
cpuidle: reduce diff size
Reduces the cpuidle processor_idle.c diff vs 2.6.22 from this
processor_idle.c | 2006 ++++++++++++++++++++++++++-----------------
1 file changed, 1219 insertions(+), 787 deletions(-)
to this:
processor_idle.c | 502 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 458 insertions(+), 44 deletions(-)
...for the purpose of making the cpuilde patch less invasive
and easier to review.
no functional changes. build tested only.
Signed-off-by: Len Brown <len.brown@intel.com>
commit 889172fc915f5a7fe20f35b133cbd205ce69bf6c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:40:05 2007 -0700
cpuidle: Retain old ACPI policy for !CONFIG_CPU_IDLE
Retain the old policy in processor_idle, so that when CPU_IDLE is not
configured, old C-state policy will still be used. This provides a
clean gradual migration path from old ACPI policy to new cpuidle
based policy.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 9544a8181edc7ecc33b3bfd69271571f98ed08bc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:39:17 2007 -0700
cpuidle: Configure governors by default
Quoting Len "Do not give an option to users to shoot themselves in the foot".
Remove the configurability of ladder and menu governors as they are
needed for default policy of cpuidle. That way users will not be able to
have cpuidle without any policy loosing all C-state power savings.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8975059a2c1e56cfe83d1bcf031bcf4cb39be743
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:27:07 2007 -0400
CPUIDLE: load ACPI properly when CPUIDLE is disabled
Change the registration return codes for when CPUIDLE
support is not compiled into the kernel. As a result, the ACPI
processor driver will load properly even if CPUIDLE is unavailable.
However, it may be possible to cleanup the ACPI processor driver further
and eliminate some dead code paths.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e0322e2b58dd1b12ec669bf84693efe0dc2414a8
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:26:06 2007 -0400
CPUIDLE: remove cpuidle_get_bm_activity()
Remove cpuidle_get_bm_activity() and updates governors
accordingly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 18a6e770d5c82ba26653e53d240caa617e09e9ab
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:58 2007 -0400
CPUIDLE: max_cstate fix
Currently max_cstate is limited to 0, resulting in no idle processor
power management on ACPI platforms. This patch restores the value to
the array size.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1fdc0887286179b40ce24bcdbde663172e205ef0
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:40 2007 -0400
CPUIDLE: handle BM detection inside the ACPI Processor driver
Update the ACPI processor driver to detect BM activity and
limit state entry depth internally, rather than exposing such
requirements to CPUIDLE. As a result, CPUIDLE can drop this
ACPI-specific interface and become more platform independent. BM
activity is now handled much more aggressively than it was in the
original implementation, so some testing coverage may be needed to
verify that this doesn't introduce any DMA buffer under-run issues.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ef38840db666f48e3cdd2b769da676c57228dd9
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:14 2007 -0400
CPUIDLE: menu governor updates
Tweak the menu governor to more effectively handle non-timer
break events. Non-timer break events are detected by comparing the
actual sleep time to the expected sleep time. In future revisions, it
may be more reliable to use the timer data structures directly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit bb4d74fca63fa96cf3ace644b15ae0f12b7df5a1
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:24:40 2007 -0400
CPUIDLE: fix 'current_governor' sysfs entry
Allow the "current_governor" sysfs entry to properly handle
input terminated with '\n'.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df3c71559bb69b125f1a48971bf0d17f78bbdf47
Author: Len Brown <len.brown@intel.com>
Date: Sun Aug 12 02:00:45 2007 -0400
cpuidle: fix IA64 build (again)
Signed-off-by: Len Brown <len.brown@intel.com>
commit a02064579e3f9530fd31baae16b1fc46b5a7bca8
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:27 2007 -0400
cpuidle: Remove support for runtime changing of max_cstate
Remove support for runtime changeability of max_cstate. Drivers can use
use latency APIs.
max_cstate can still be used as a boot time option and dmi override.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0912a44b13adf22f5e3f607d263aed23b4910d7e
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:16 2007 -0400
cpuidle: Remove ACPI cstate_limit calls from ipw2100
ipw2100 already has code to use accetable_latency interfaces to limit the
C-state. Remove the calls to acpi_set_cstate_limit and acpi_get_cstate_limit
as they are redundant.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c649a76e76be6bff1fd770d0a775798813a3f6e0
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:35:39 2007 -0400
cpuidle: compile fix for pause and resume functions
Fix the compilation failure when cpuidle is not compiled in.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Adam Belay <adam.belay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2305a5920fb8ee6ccec1c62ade05aa8351091d71
Author: Adam Belay <abelay@novell.com>
Date: Thu Jul 19 00:49:00 2007 -0400
cpuidle: re-write
Some portions have been rewritten to make the code cleaner and lighter
weight. The following is a list of changes:
1.) the state name is now included in the sysfs interface
2.) detection, hotplug, and available state modifications are handled by
CPUIDLE drivers directly
3.) the CPUIDLE idle handler is only ever installed when at least one
cpuidle_device is enabled and ready
4.) the menu governor BM code no longer overflows
5.) the sysfs attributes are now printed as unsigned integers, avoiding
negative values
6.) a variety of other small cleanups
Also, Idle drivers are no longer swappable during runtime through the
CPUIDLE sysfs inteface. On i386 and x86_64 most idle handlers (e.g.
poll, mwait, halt, etc.) don't benefit from an infrastructure that
supports multiple states, so I think using a more general case idle
handler selection mechanism would be cleaner.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df25b6b56955714e6e24b574d88d1fd11f0c3ee5
Author: Len Brown <len.brown@intel.com>
Date: Tue Jul 24 17:08:21 2007 -0400
cpuidle: fix IA64 buid
Signed-off-by: Len Brown <len.brown@intel.com>
commit fd6ada4c14488755ff7068860078c437431fbccd
Author: Adrian Bunk <bunk@stusta.de>
Date: Mon Jul 9 11:33:13 2007 -0700
cpuidle: static
make cpuidle_replace_governor() static
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c1d4a2cebcadf2429c0c72e1d29aa2a9684c32e0
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Jul 3 00:54:40 2007 -0400
cpuidle: static
This patch makes the needlessly global struct menu_governor static.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit dbf8780c6e8d572c2c273da97ed1cca7608fd999
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:49:14 2007 -0400
export symbol tick_nohz_get_sleep_length
ERROR: "tick_nohz_get_sleep_length" [drivers/cpuidle/governors/menu.ko] undefined!
ERROR: "tick_nohz_get_idle_jiffies" [drivers/cpuidle/governors/menu.ko] undefined!
And please be sure to get your changes to core kernel suitably reviewed.
Cc: Adam Belay <abelay@novell.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 29f0e248e7017be15f99febf9143a2cef00b2961
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:43:04 2007 -0400
tick.h needs hrtimer.h
It uses hrtimers.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e40cede7d63a029e92712a3fe02faee60cc38fb4
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:40:34 2007 -0400
cpuidle: first round of documentation updates
Documentation changes based on Pavel's feedback.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 83b42be2efece386976507555c29e7773a0dfcd1
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:39:25 2007 -0400
cpuidle: add rating to the governors and pick the one with highest rating by default
Introduce a governor rating scheme to pick the right governor by default.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d2a74b8c5e8f22def4709330d4bfc4a29209b71c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:38:08 2007 -0400
cpuidle: make cpuidle sysfs driver governor switch off by default
Make default cpuidle sysfs to show current_governor and current_driver in
read-only mode. More elaborate available_governors and available_drivers with
writeable current_governor and current_driver interface only appear with
"cpuidle_sysfs_switch" boot parameter.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1f60a0e80bf83cf6b55c8845bbe5596ed8f6307b
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:37:00 2007 -0400
cpuidle: menu governor: change the early break condition
Change the C-state early break out algorithm in menu governor.
We only look at early breakouts that result in wakeups shorter than idle
state's target_residency. If such a breakout is frequent enough, eliminate
the particular idle state upto a timeout period.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 45a42095cf64b003b4a69be3ce7f434f97d7af51
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:35:38 2007 -0400
cpuidle: fix uninitialized variable in sysfs routine
Fix the uninitialized usage of ret.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 80dca7cdba3e6ee13eae277660873ab9584eb3be
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:34:16 2007 -0400
cpuidle: reenable /proc/acpi//power interface for the time being
Keep /proc/acpi/processor/CPU*/power around for a while as powertop depends
on it. It will be marked deprecated and removed in future. powertop can use
cpuidle interfaces instead.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 589c37c2646c5e3813a51255a5ee1159cb4c33fc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:32:37 2007 -0400
cpuidle: menu governor and hrtimer compile fix
Compile fix for menu governor.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ba80bd9ab3ed304cb4f19b722e4cc6740588b5e
Author: Len Brown <len.brown@intel.com>
Date: Thu May 31 22:51:43 2007 -0400
cpuidle: build fix - cpuidle vs ipw2100 module
ERROR: "acpi_set_cstate_limit" [drivers/net/wireless/ipw2100.ko] undefined!
Signed-off-by: Len Brown <len.brown@intel.com>
commit d7d8fa7f96a7f7682be7c6cc0cc53fa7a18c3b58
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:07 2007 -0400
cpuidle: add the 'menu' governor
Here is my first take at implementing an idle PM governor that takes
full advantage of NO_HZ. I call it the 'menu' governor because it
considers the full list of idle states before each entry.
I've kept the implementation fairly simple. It attempts to guess the
next residency time and then chooses a state that would meet at least
the break-even point between power savings and entry cost. To this end,
it selects the deepest idle state that satisfies the following
constraints:
1. If the idle time elapsed since bus master activity was detected
is below a threshold (currently 20 ms), then limit the selection
to C2-type or above.
2. Do not choose a state with a break-even residency that exceeds
the expected time remaining until the next timer interrupt.
3. Do not choose a state with a break-even residency that exceeds
the elapsed time between the last pair of break events,
excluding timer interrupts.
This governor has an advantage over "ladder" governor because it
proactively checks how much time remains until the next timer interrupt
using the tick infrastructure. Also, it handles device interrupt
activity more intelligently by not including timer interrupts in break
event calculations. Finally, it doesn't make policy decisions using the
number of state entries, which can have variable residency times (NO_HZ
makes these potentially very large), and instead only considers sleep
time deltas.
The menu governor can be selected during runtime using the cpuidle sysfs
interface like so:
"echo "menu" > /sys/devices/system/cpu/cpuidle/current_governor"
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit a4bec7e65aa3b7488b879d971651cc99a6c410fe
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:03 2007 -0400
cpuidle: export time until next timer interrupt using NO_HZ
Expose information about the time remaining until the next
timer interrupt expires by utilizing the dynticks infrastructure.
Also modify the main idle loop to allow dynticks to handle
non-interrupt break events (e.g. DMA). Finally, expose sleep ticks
information to external code. Thomas Gleixner is responsible for much
of the code in this patch. However, I've made some additional changes,
so I'm probably responsible if there are any bugs or oversights :)
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2929d8996fbc77f41a5ff86bb67cdde3ca7d2d72
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:46:58 2007 -0400
cpuidle: governor API changes
This patch prepares cpuidle for the menu governor. It adds an optional
stage after idle state entry to give the governor an opportunity to
check why the state was exited. Also it makes sure the idle loop
returns after each state entry, allowing the appropriate dynticks code
to run.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 3a7fd42f9825c3b03e364ca59baa751bb350775f
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Apr 26 00:03:59 2007 -0700
cpuidle: hang fix
Prevent hang on x86-64, when ACPI processor driver is added as a module on
a system that does not support C-states.
x86-64 expects all idle handlers to enable interrupts before returning from
idle handler. This is due to enter_idle(), exit_idle() races. Make
cpuidle_idle_call() confirm to this when there is no pm_idle_old.
Also, cpuidle look at the return values of attch_driver() and set
current_driver to NULL if attach fails on all CPUs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4893339a142afbd5b7c01ffadfd53d14746e858e
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:09 2007 +0800
cpuidle: add support for max_cstate limit
With CPUIDLE framework, the max_cstate (to limit max cpu c-state)
parameter is ingored. Some systems require it to ignore C2/C3
and some drivers like ipw require it too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 43bbbbe1cb998cbd2df656f55bb3bfe30f30e7d1
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:13 2007 +0800
cpuidle: add cpuidle_fore_redetect_devices API
add cpuidle_force_redetect_devices API,
which forces all CPU redetect idle states.
Next patch will use it.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d1edadd608f24836def5ec483d2edccfb37b1d19
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:01 2007 +0800
cpuidle: fix sysfs related issue
Fix the cpuidle sysfs issue.
a. make kobject dynamicaly allocated
b. fixed sysfs init issue to avoid suspend/resume issue
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 7169a5cc0d67b263978859672e86c13c23a5570d
Author: Randy Dunlap <randy.dunlap@oracle.com>
Date: Wed Mar 28 22:52:53 2007 -0400
cpuidle: 1-bit field must be unsigned
A 1-bit bitfield has no room for a sign bit.
drivers/cpuidle/governors/ladder.c:54:16: error: dubious bitfield without explicit `signed' or `unsigned'
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4658620158dc2fbd9e4bcb213c5b6fb5d05ba7d4
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 28 22:52:41 2007 -0400
cpuidle: fix boot hang
Patch for cpuidle boot hang reported by Larry Finger here.
http://www.ussg.iu.edu/hypermail/linux/kernel/0703.2/2025.html
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Larry Finger <larry.finger@lwfinger.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c17e168aa6e5fe3851baaae8df2fbc1cf11443a9
Author: Len Brown <len.brown@intel.com>
Date: Wed Mar 7 04:37:53 2007 -0500
cpuidle: ladder does not depend on ACPI
build fix for CONFIG_ACPI=n
In file included from drivers/cpuidle/governors/ladder.c:21:
include/acpi/processor.h:88: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:106: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:168: error: expected specifier-qualifier-list before âacpi_handleâ
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8c91d958246bde68db0c3f0c57b535962ce861cb
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Mar 6 02:29:40 2007 -0800
cpuidle: make code static
This patch makes the following needlessly global code static:
- driver.c: __cpuidle_find_driver()
- governor.c: __cpuidle_find_governor()
- ladder.c: struct ladder_governor
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0c39dc3187094c72c33ab65a64d2017b21f372d2
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 7 02:38:22 2007 -0500
cpu_idle: fix build break
This patch fixes a build breakage with !CONFIG_HOTPLUG_CPU and
CONFIG_CPU_IDLE.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8112e3b115659b07df340ef170515799c0105f82
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Mar 6 02:29:39 2007 -0800
cpuidle: build fix for !CPU_IDLE
Fix the compile issues when CPU_IDLE is not configured.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1eb4431e9599cd25e0d9872f3c2c8986821839dd
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:57 2007 -0800
cpuidle take2: Basic documentation for cpuidle
Documentation for cpuidle infrastructure
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit ef5f15a8b79123a047285ec2e3899108661df779
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:03 2007 -0800
cpuidle take2: Hookup ACPI C-states driver with cpuidle
Hookup ACPI C-states onto generic cpuidle infrastructure.
drivers/acpi/procesor_idle.c is now a ACPI C-states driver that registers as
a driver in cpuidle infrastructure and the policy part is removed from
drivers/acpi/processor_idle.c. We use governor in cpuidle instead.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 987196fa82d4db52c407e8c9d5dec884ba602183
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:52:57 2007 -0800
cpuidle take2: Core cpuidle infrastructure
Announcing 'cpuidle', a new CPU power management infrastructure to manage
idle CPUs in a clean and efficient manner.
cpuidle separates out the drivers that can provide support for multiple types
of idle states and policy governors that decide on what idle state to use
at run time.
A cpuidle driver can support multiple idle states based on parameters like
varying power consumption, wakeup latency, etc (ACPI C-states for example).
A cpuidle governor can be usage model specific (laptop, server,
laptop on battery etc).
Main advantage of the infrastructure being, it allows independent development
of drivers and governors and allows for better CPU power management.
A huge thanks to Adam Belay and Shaohua Li who were part of this mini-project
since its beginning and are greatly responsible for this patchset.
This patch:
Core cpuidle infrastructure.
Introduces a new abstraction layer for cpuidle:
* which manages drivers that can support multiple idles states. Drivers
can be generic or particular to specific hardware/platform
* allows pluging in multiple policy governors that can take idle state policy
decision
* The core also has a set of sysfs interfaces with which administrato can know
about supported drivers and governors and switch them at run time.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2007-10-04 06:58:00 +08:00
|
|
|
ts->sleep_length = ktime_sub(dev->next_event, now);
|
2015-04-15 05:08:58 +08:00
|
|
|
return tick;
|
2011-10-08 00:22:06 +08:00
|
|
|
}
|
|
|
|
|
2016-04-13 21:56:51 +08:00
|
|
|
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
2015-05-29 20:42:15 +08:00
|
|
|
{
|
|
|
|
/* Update jiffies first */
|
|
|
|
tick_do_update_jiffies64(now);
|
2016-04-13 21:56:51 +08:00
|
|
|
cpu_load_update_nohz_stop();
|
2015-05-29 20:42:15 +08:00
|
|
|
|
2016-07-04 17:50:36 +08:00
|
|
|
/*
|
|
|
|
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
|
|
|
|
* the clock forward checks in the enqueue path:
|
|
|
|
*/
|
|
|
|
timer_clear_idle();
|
|
|
|
|
2017-06-19 10:12:00 +08:00
|
|
|
calc_load_nohz_stop();
|
2015-12-09 00:28:04 +08:00
|
|
|
touch_softlockup_watchdog_sched();
|
2015-05-29 20:42:15 +08:00
|
|
|
/*
|
|
|
|
* Cancel the scheduled timer and restore the tick
|
|
|
|
*/
|
|
|
|
ts->tick_stopped = 0;
|
|
|
|
ts->idle_exittime = now;
|
|
|
|
|
|
|
|
tick_nohz_restart(ts, now);
|
|
|
|
}
|
2015-05-28 01:22:08 +08:00
|
|
|
|
|
|
|
static void tick_nohz_full_update_tick(struct tick_sched *ts)
|
2013-04-20 22:40:31 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
2013-11-28 14:27:11 +08:00
|
|
|
int cpu = smp_processor_id();
|
2013-04-20 22:40:31 +08:00
|
|
|
|
2015-05-27 21:42:42 +08:00
|
|
|
if (!tick_nohz_full_cpu(cpu))
|
2013-11-28 14:27:11 +08:00
|
|
|
return;
|
2013-04-20 22:40:31 +08:00
|
|
|
|
2013-11-28 14:27:11 +08:00
|
|
|
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
|
|
|
|
return;
|
2013-04-20 22:40:31 +08:00
|
|
|
|
2016-09-07 18:51:13 +08:00
|
|
|
if (can_stop_full_tick(cpu, ts))
|
2015-05-28 01:22:08 +08:00
|
|
|
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
|
|
|
|
else if (ts->tick_stopped)
|
2016-04-13 21:56:51 +08:00
|
|
|
tick_nohz_restart_sched_tick(ts, ktime_get());
|
2013-04-20 22:40:31 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-08-01 06:06:10 +08:00
|
|
|
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
|
|
|
{
|
|
|
|
/*
|
2016-07-01 18:42:35 +08:00
|
|
|
* If this CPU is offline and it is the one which updates
|
2011-08-01 06:06:10 +08:00
|
|
|
* jiffies, then give up the assignment and let it be taken by
|
2016-07-01 18:42:35 +08:00
|
|
|
* the CPU which runs the tick timer next. If we don't drop
|
2011-08-01 06:06:10 +08:00
|
|
|
* this here the jiffies might be stale and do_timer() never
|
|
|
|
* invoked.
|
|
|
|
*/
|
|
|
|
if (unlikely(!cpu_online(cpu))) {
|
|
|
|
if (cpu == tick_do_timer_cpu)
|
|
|
|
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
nohz: Fix collision between tick and other hrtimers, again
This restores commit:
24b91e360ef5: ("nohz: Fix collision between tick and other hrtimers")
... which got reverted by commit:
558e8e27e73f: ('Revert "nohz: Fix collision between tick and other hrtimers"')
... due to a regression where CPUs spuriously stopped ticking.
The bug happened when a tick fired too early past its expected expiration:
on IRQ exit the tick was scheduled again to the same deadline but skipped
reprogramming because ts->next_tick still kept in cache the deadline.
This has been fixed now with resetting ts->next_tick from the tick
itself. Extra care has also been taken to prevent from obsolete values
throughout CPU hotplug operations.
When the tick is stopped and an interrupt occurs afterward, we check on
that interrupt exit if the next tick needs to be rescheduled. If it
doesn't need any update, we don't want to do anything.
In order to check if the tick needs an update, we compare it against the
clockevent device deadline. Now that's a problem because the clockevent
device is at a lower level than the tick itself if it is implemented
on top of hrtimer.
Every hrtimer share this clockevent device. So comparing the next tick
deadline against the clockevent device deadline is wrong because the
device may be programmed for another hrtimer whose deadline collides
with the tick. As a result we may end up not reprogramming the tick
accidentally.
In a worst case scenario under full dynticks mode, the tick stops firing
as it is supposed to every 1hz, leaving /proc/stat stalled:
Task in a full dynticks CPU
----------------------------
* hrtimer A is queued 2 seconds ahead
* the tick is stopped, scheduled 1 second ahead
* tick fires 1 second later
* on tick exit, nohz schedules the tick 1 second ahead but sees
the clockevent device is already programmed to that deadline,
fooled by hrtimer A, the tick isn't rescheduled.
* hrtimer A is cancelled before its deadline
* tick never fires again until an interrupt happens...
In order to fix this, store the next tick deadline to the tick_sched
local structure and reuse that value later to check whether we need to
reprogram the clock after an interrupt.
On the other hand, ts->sleep_length still wants to know about the next
clock event and not just the tick, so we want to improve the related
comment to avoid confusion.
Reported-and-tested-by: Tim Wright <tim@binbash.co.uk>
Reported-and-tested-by: Pavel Machek <pavel@ucw.cz>
Reported-by: James Hartsock <hartsjc@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1492783255-5051-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-04-21 22:00:54 +08:00
|
|
|
/*
|
|
|
|
* Make sure the CPU doesn't get fooled by obsolete tick
|
|
|
|
* deadline if it comes back online later.
|
|
|
|
*/
|
|
|
|
ts->next_tick = 0;
|
2013-05-14 03:40:27 +08:00
|
|
|
return false;
|
2011-08-01 06:06:10 +08:00
|
|
|
}
|
|
|
|
|
2013-11-29 19:18:13 +08:00
|
|
|
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
|
2016-12-25 18:38:40 +08:00
|
|
|
ts->sleep_length = NSEC_PER_SEC / HZ;
|
2011-08-01 06:06:10 +08:00
|
|
|
return false;
|
2013-11-29 19:18:13 +08:00
|
|
|
}
|
2011-08-01 06:06:10 +08:00
|
|
|
|
|
|
|
if (need_resched())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
|
|
|
static int ratelimit;
|
|
|
|
|
2012-08-23 23:34:07 +08:00
|
|
|
if (ratelimit < 10 &&
|
|
|
|
(local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
|
2013-02-09 01:37:30 +08:00
|
|
|
pr_warn("NOHZ: local_softirq_pending %02x\n",
|
|
|
|
(unsigned int) local_softirq_pending());
|
2011-08-01 06:06:10 +08:00
|
|
|
ratelimit++;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-07-25 05:52:27 +08:00
|
|
|
if (tick_nohz_full_enabled()) {
|
2012-12-19 01:24:35 +08:00
|
|
|
/*
|
|
|
|
* Keep the tick alive to guarantee timekeeping progression
|
|
|
|
* if there are full dynticks CPUs around
|
|
|
|
*/
|
|
|
|
if (tick_do_timer_cpu == cpu)
|
|
|
|
return false;
|
|
|
|
/*
|
|
|
|
* Boot safety: make sure the timekeeping duty has been
|
|
|
|
* assigned before entering dyntick-idle mode,
|
|
|
|
*/
|
|
|
|
if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-08-01 06:06:10 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-07-27 23:29:28 +08:00
|
|
|
static void __tick_nohz_idle_enter(struct tick_sched *ts)
|
|
|
|
{
|
2011-08-01 07:25:38 +08:00
|
|
|
ktime_t now, expires;
|
2011-08-01 06:06:10 +08:00
|
|
|
int cpu = smp_processor_id();
|
2011-07-27 23:29:28 +08:00
|
|
|
|
2016-09-02 14:38:23 +08:00
|
|
|
now = tick_nohz_start_idle(ts);
|
|
|
|
|
2011-08-01 06:06:10 +08:00
|
|
|
if (can_stop_idle_tick(cpu, ts)) {
|
|
|
|
int was_stopped = ts->tick_stopped;
|
|
|
|
|
|
|
|
ts->idle_calls++;
|
2011-08-01 07:25:38 +08:00
|
|
|
|
|
|
|
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
|
2016-12-25 18:38:40 +08:00
|
|
|
if (expires > 0LL) {
|
2011-08-01 07:25:38 +08:00
|
|
|
ts->idle_sleeps++;
|
|
|
|
ts->idle_expires = expires;
|
|
|
|
}
|
2011-08-01 06:06:10 +08:00
|
|
|
|
2017-06-19 10:12:01 +08:00
|
|
|
if (!was_stopped && ts->tick_stopped) {
|
2011-08-01 06:06:10 +08:00
|
|
|
ts->idle_jiffies = ts->last_jiffies;
|
2017-06-19 10:12:01 +08:00
|
|
|
nohz_balance_enter_idle(cpu);
|
|
|
|
}
|
2011-08-01 06:06:10 +08:00
|
|
|
}
|
2011-10-08 00:22:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tick_nohz_idle_enter - stop the idle tick from the idle task
|
|
|
|
*
|
|
|
|
* When the next event is more than a tick into the future, stop the idle tick
|
|
|
|
* Called when we start the idle loop.
|
2011-10-08 22:01:00 +08:00
|
|
|
*
|
2011-11-18 01:48:14 +08:00
|
|
|
* The arch is responsible of calling:
|
2011-10-08 22:01:00 +08:00
|
|
|
*
|
|
|
|
* - rcu_idle_enter() after its last use of RCU before the CPU is put
|
|
|
|
* to sleep.
|
|
|
|
* - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
|
2011-10-08 00:22:06 +08:00
|
|
|
*/
|
2011-11-18 01:48:14 +08:00
|
|
|
void tick_nohz_idle_enter(void)
|
2011-10-08 00:22:06 +08:00
|
|
|
{
|
|
|
|
struct tick_sched *ts;
|
|
|
|
|
2017-11-06 23:01:20 +08:00
|
|
|
lockdep_assert_irqs_enabled();
|
2012-01-07 00:33:28 +08:00
|
|
|
|
2011-11-18 01:48:14 +08:00
|
|
|
local_irq_disable();
|
|
|
|
|
2014-08-18 01:30:25 +08:00
|
|
|
ts = this_cpu_ptr(&tick_cpu_sched);
|
2011-10-08 00:22:06 +08:00
|
|
|
ts->inidle = 1;
|
2011-07-27 23:29:28 +08:00
|
|
|
__tick_nohz_idle_enter(ts);
|
2011-11-18 01:48:14 +08:00
|
|
|
|
|
|
|
local_irq_enable();
|
2011-10-08 00:22:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tick_nohz_irq_exit - update next tick event from interrupt exit
|
|
|
|
*
|
|
|
|
* When an interrupt fires while we are idle and it doesn't cause
|
|
|
|
* a reschedule, it may still add, modify or delete a timer, enqueue
|
|
|
|
* an RCU callback, etc...
|
|
|
|
* So we need to re-calculate and reprogram the next tick event.
|
|
|
|
*/
|
|
|
|
void tick_nohz_irq_exit(void)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2011-10-08 00:22:06 +08:00
|
|
|
|
Revert "cpuidle: Quickly notice prediction failure for repeat mode"
Revert commit 69a37bea (cpuidle: Quickly notice prediction failure for
repeat mode), because it has been identified as the source of a
significant performance regression in v3.8 and later as explained by
Jeremy Eder:
We believe we've identified a particular commit to the cpuidle code
that seems to be impacting performance of variety of workloads.
The simplest way to reproduce is using netperf TCP_RR test, so
we're using that, on a pair of Sandy Bridge based servers. We also
have data from a large database setup where performance is also
measurably/positively impacted, though that test data isn't easily
share-able.
Included below are test results from 3 test kernels:
kernel reverts
-----------------------------------------------------------
1) vanilla upstream (no reverts)
2) perfteam2 reverts e11538d1f03914eb92af5a1a378375c05ae8520c
3) test reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4
e11538d1f03914eb92af5a1a378375c05ae8520c
In summary, netperf TCP_RR numbers improve by approximately 4%
after reverting 69a37beabf1f0a6705c08e879bdd5d82ff6486c4. When
69a37beabf1f0a6705c08e879bdd5d82ff6486c4 is included, C0 residency
never seems to get above 40%. Taking that patch out gets C0 near
100% quite often, and performance increases.
The below data are histograms representing the %c0 residency @
1-second sample rates (using turbostat), while under netperf test.
- If you look at the first 4 histograms, you can see %c0 residency
almost entirely in the 30,40% bin.
- The last pair, which reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4,
shows %c0 in the 80,90,100% bins.
Below each kernel name are netperf TCP_RR trans/s numbers for the
particular kernel that can be disclosed publicly, comparing the 3
test kernels. We ran a 4th test with the vanilla kernel where
we've also set /dev/cpu_dma_latency=0 to show overall impact
boosting single-threaded TCP_RR performance over 11% above
baseline.
3.10-rc2 vanilla RX + c0 lock (/dev/cpu_dma_latency=0):
TCP_RR trans/s 54323.78
-----------------------------------------------------------
3.10-rc2 vanilla RX (no reverts)
TCP_RR trans/s 48192.47
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 59]:
***********************************************************
40.0000 - 50.0000 [ 1]: *
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
Sender %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 11]: ***********
40.0000 - 50.0000 [ 49]:
*************************************************
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
-----------------------------------------------------------
3.10-rc2 perfteam2 RX (reverts commit
e11538d1f03914eb92af5a1a378375c05ae8520c)
TCP_RR trans/s 49698.69
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 1]: *
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 59]:
***********************************************************
40.0000 - 50.0000 [ 0]:
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
Sender %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 2]: **
40.0000 - 50.0000 [ 58]:
**********************************************************
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
-----------------------------------------------------------
3.10-rc2 test RX (reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4
and e11538d1f03914eb92af5a1a378375c05ae8520c)
TCP_RR trans/s 47766.95
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 1]: *
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 27]: ***************************
40.0000 - 50.0000 [ 2]: **
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 2]: **
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 28]: ****************************
Sender:
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 11]: ***********
40.0000 - 50.0000 [ 0]:
50.0000 - 60.0000 [ 1]: *
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 3]: ***
80.0000 - 90.0000 [ 7]: *******
90.0000 - 100.0000 [ 38]: **************************************
These results demonstrate gaining back the tendency of the CPU to
stay in more responsive, performant C-states (and thus yield
measurably better performance), by reverting commit
69a37beabf1f0a6705c08e879bdd5d82ff6486c4.
Requested-by: Jeremy Eder <jeder@redhat.com>
Tested-by: Len Brown <len.brown@intel.com>
Cc: 3.8+ <stable@vger.kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-07-27 07:41:34 +08:00
|
|
|
if (ts->inidle)
|
2013-04-20 22:40:31 +08:00
|
|
|
__tick_nohz_idle_enter(ts);
|
Revert "cpuidle: Quickly notice prediction failure for repeat mode"
Revert commit 69a37bea (cpuidle: Quickly notice prediction failure for
repeat mode), because it has been identified as the source of a
significant performance regression in v3.8 and later as explained by
Jeremy Eder:
We believe we've identified a particular commit to the cpuidle code
that seems to be impacting performance of variety of workloads.
The simplest way to reproduce is using netperf TCP_RR test, so
we're using that, on a pair of Sandy Bridge based servers. We also
have data from a large database setup where performance is also
measurably/positively impacted, though that test data isn't easily
share-able.
Included below are test results from 3 test kernels:
kernel reverts
-----------------------------------------------------------
1) vanilla upstream (no reverts)
2) perfteam2 reverts e11538d1f03914eb92af5a1a378375c05ae8520c
3) test reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4
e11538d1f03914eb92af5a1a378375c05ae8520c
In summary, netperf TCP_RR numbers improve by approximately 4%
after reverting 69a37beabf1f0a6705c08e879bdd5d82ff6486c4. When
69a37beabf1f0a6705c08e879bdd5d82ff6486c4 is included, C0 residency
never seems to get above 40%. Taking that patch out gets C0 near
100% quite often, and performance increases.
The below data are histograms representing the %c0 residency @
1-second sample rates (using turbostat), while under netperf test.
- If you look at the first 4 histograms, you can see %c0 residency
almost entirely in the 30,40% bin.
- The last pair, which reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4,
shows %c0 in the 80,90,100% bins.
Below each kernel name are netperf TCP_RR trans/s numbers for the
particular kernel that can be disclosed publicly, comparing the 3
test kernels. We ran a 4th test with the vanilla kernel where
we've also set /dev/cpu_dma_latency=0 to show overall impact
boosting single-threaded TCP_RR performance over 11% above
baseline.
3.10-rc2 vanilla RX + c0 lock (/dev/cpu_dma_latency=0):
TCP_RR trans/s 54323.78
-----------------------------------------------------------
3.10-rc2 vanilla RX (no reverts)
TCP_RR trans/s 48192.47
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 59]:
***********************************************************
40.0000 - 50.0000 [ 1]: *
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
Sender %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 11]: ***********
40.0000 - 50.0000 [ 49]:
*************************************************
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
-----------------------------------------------------------
3.10-rc2 perfteam2 RX (reverts commit
e11538d1f03914eb92af5a1a378375c05ae8520c)
TCP_RR trans/s 49698.69
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 1]: *
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 59]:
***********************************************************
40.0000 - 50.0000 [ 0]:
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
Sender %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 2]: **
40.0000 - 50.0000 [ 58]:
**********************************************************
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 0]:
-----------------------------------------------------------
3.10-rc2 test RX (reverts 69a37beabf1f0a6705c08e879bdd5d82ff6486c4
and e11538d1f03914eb92af5a1a378375c05ae8520c)
TCP_RR trans/s 47766.95
Receiver %c0
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 1]: *
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 27]: ***************************
40.0000 - 50.0000 [ 2]: **
50.0000 - 60.0000 [ 0]:
60.0000 - 70.0000 [ 2]: **
70.0000 - 80.0000 [ 0]:
80.0000 - 90.0000 [ 0]:
90.0000 - 100.0000 [ 28]: ****************************
Sender:
0.0000 - 10.0000 [ 1]: *
10.0000 - 20.0000 [ 0]:
20.0000 - 30.0000 [ 0]:
30.0000 - 40.0000 [ 11]: ***********
40.0000 - 50.0000 [ 0]:
50.0000 - 60.0000 [ 1]: *
60.0000 - 70.0000 [ 0]:
70.0000 - 80.0000 [ 3]: ***
80.0000 - 90.0000 [ 7]: *******
90.0000 - 100.0000 [ 38]: **************************************
These results demonstrate gaining back the tendency of the CPU to
stay in more responsive, performant C-states (and thus yield
measurably better performance), by reverting commit
69a37beabf1f0a6705c08e879bdd5d82ff6486c4.
Requested-by: Jeremy Eder <jeder@redhat.com>
Tested-by: Len Brown <len.brown@intel.com>
Cc: 3.8+ <stable@vger.kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-07-27 07:41:34 +08:00
|
|
|
else
|
2015-05-28 01:22:08 +08:00
|
|
|
tick_nohz_full_update_tick(ts);
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
cpuidle: consolidate 2.6.22 cpuidle branch into one patch
commit e5a16b1f9eec0af7cfa0830304b41c1c0833cf9f
Author: Len Brown <len.brown@intel.com>
Date: Tue Oct 2 23:44:44 2007 -0400
cpuidle: shrink diff
processor_idle.c | 440 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 429 insertions(+), 11 deletions(-)
Signed-off-by: Len Brown <len.brown@intel.com>
commit dfbb9d5aedfb18848a3e0d6f6e3e4969febb209c
Author: Len Brown <len.brown@intel.com>
Date: Wed Sep 26 02:17:55 2007 -0400
cpuidle: reduce diff size
Reduces the cpuidle processor_idle.c diff vs 2.6.22 from this
processor_idle.c | 2006 ++++++++++++++++++++++++++-----------------
1 file changed, 1219 insertions(+), 787 deletions(-)
to this:
processor_idle.c | 502 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 458 insertions(+), 44 deletions(-)
...for the purpose of making the cpuilde patch less invasive
and easier to review.
no functional changes. build tested only.
Signed-off-by: Len Brown <len.brown@intel.com>
commit 889172fc915f5a7fe20f35b133cbd205ce69bf6c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:40:05 2007 -0700
cpuidle: Retain old ACPI policy for !CONFIG_CPU_IDLE
Retain the old policy in processor_idle, so that when CPU_IDLE is not
configured, old C-state policy will still be used. This provides a
clean gradual migration path from old ACPI policy to new cpuidle
based policy.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 9544a8181edc7ecc33b3bfd69271571f98ed08bc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:39:17 2007 -0700
cpuidle: Configure governors by default
Quoting Len "Do not give an option to users to shoot themselves in the foot".
Remove the configurability of ladder and menu governors as they are
needed for default policy of cpuidle. That way users will not be able to
have cpuidle without any policy loosing all C-state power savings.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8975059a2c1e56cfe83d1bcf031bcf4cb39be743
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:27:07 2007 -0400
CPUIDLE: load ACPI properly when CPUIDLE is disabled
Change the registration return codes for when CPUIDLE
support is not compiled into the kernel. As a result, the ACPI
processor driver will load properly even if CPUIDLE is unavailable.
However, it may be possible to cleanup the ACPI processor driver further
and eliminate some dead code paths.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e0322e2b58dd1b12ec669bf84693efe0dc2414a8
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:26:06 2007 -0400
CPUIDLE: remove cpuidle_get_bm_activity()
Remove cpuidle_get_bm_activity() and updates governors
accordingly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 18a6e770d5c82ba26653e53d240caa617e09e9ab
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:58 2007 -0400
CPUIDLE: max_cstate fix
Currently max_cstate is limited to 0, resulting in no idle processor
power management on ACPI platforms. This patch restores the value to
the array size.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1fdc0887286179b40ce24bcdbde663172e205ef0
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:40 2007 -0400
CPUIDLE: handle BM detection inside the ACPI Processor driver
Update the ACPI processor driver to detect BM activity and
limit state entry depth internally, rather than exposing such
requirements to CPUIDLE. As a result, CPUIDLE can drop this
ACPI-specific interface and become more platform independent. BM
activity is now handled much more aggressively than it was in the
original implementation, so some testing coverage may be needed to
verify that this doesn't introduce any DMA buffer under-run issues.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ef38840db666f48e3cdd2b769da676c57228dd9
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:14 2007 -0400
CPUIDLE: menu governor updates
Tweak the menu governor to more effectively handle non-timer
break events. Non-timer break events are detected by comparing the
actual sleep time to the expected sleep time. In future revisions, it
may be more reliable to use the timer data structures directly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit bb4d74fca63fa96cf3ace644b15ae0f12b7df5a1
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:24:40 2007 -0400
CPUIDLE: fix 'current_governor' sysfs entry
Allow the "current_governor" sysfs entry to properly handle
input terminated with '\n'.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df3c71559bb69b125f1a48971bf0d17f78bbdf47
Author: Len Brown <len.brown@intel.com>
Date: Sun Aug 12 02:00:45 2007 -0400
cpuidle: fix IA64 build (again)
Signed-off-by: Len Brown <len.brown@intel.com>
commit a02064579e3f9530fd31baae16b1fc46b5a7bca8
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:27 2007 -0400
cpuidle: Remove support for runtime changing of max_cstate
Remove support for runtime changeability of max_cstate. Drivers can use
use latency APIs.
max_cstate can still be used as a boot time option and dmi override.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0912a44b13adf22f5e3f607d263aed23b4910d7e
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:16 2007 -0400
cpuidle: Remove ACPI cstate_limit calls from ipw2100
ipw2100 already has code to use accetable_latency interfaces to limit the
C-state. Remove the calls to acpi_set_cstate_limit and acpi_get_cstate_limit
as they are redundant.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c649a76e76be6bff1fd770d0a775798813a3f6e0
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:35:39 2007 -0400
cpuidle: compile fix for pause and resume functions
Fix the compilation failure when cpuidle is not compiled in.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Adam Belay <adam.belay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2305a5920fb8ee6ccec1c62ade05aa8351091d71
Author: Adam Belay <abelay@novell.com>
Date: Thu Jul 19 00:49:00 2007 -0400
cpuidle: re-write
Some portions have been rewritten to make the code cleaner and lighter
weight. The following is a list of changes:
1.) the state name is now included in the sysfs interface
2.) detection, hotplug, and available state modifications are handled by
CPUIDLE drivers directly
3.) the CPUIDLE idle handler is only ever installed when at least one
cpuidle_device is enabled and ready
4.) the menu governor BM code no longer overflows
5.) the sysfs attributes are now printed as unsigned integers, avoiding
negative values
6.) a variety of other small cleanups
Also, Idle drivers are no longer swappable during runtime through the
CPUIDLE sysfs inteface. On i386 and x86_64 most idle handlers (e.g.
poll, mwait, halt, etc.) don't benefit from an infrastructure that
supports multiple states, so I think using a more general case idle
handler selection mechanism would be cleaner.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df25b6b56955714e6e24b574d88d1fd11f0c3ee5
Author: Len Brown <len.brown@intel.com>
Date: Tue Jul 24 17:08:21 2007 -0400
cpuidle: fix IA64 buid
Signed-off-by: Len Brown <len.brown@intel.com>
commit fd6ada4c14488755ff7068860078c437431fbccd
Author: Adrian Bunk <bunk@stusta.de>
Date: Mon Jul 9 11:33:13 2007 -0700
cpuidle: static
make cpuidle_replace_governor() static
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c1d4a2cebcadf2429c0c72e1d29aa2a9684c32e0
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Jul 3 00:54:40 2007 -0400
cpuidle: static
This patch makes the needlessly global struct menu_governor static.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit dbf8780c6e8d572c2c273da97ed1cca7608fd999
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:49:14 2007 -0400
export symbol tick_nohz_get_sleep_length
ERROR: "tick_nohz_get_sleep_length" [drivers/cpuidle/governors/menu.ko] undefined!
ERROR: "tick_nohz_get_idle_jiffies" [drivers/cpuidle/governors/menu.ko] undefined!
And please be sure to get your changes to core kernel suitably reviewed.
Cc: Adam Belay <abelay@novell.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 29f0e248e7017be15f99febf9143a2cef00b2961
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:43:04 2007 -0400
tick.h needs hrtimer.h
It uses hrtimers.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e40cede7d63a029e92712a3fe02faee60cc38fb4
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:40:34 2007 -0400
cpuidle: first round of documentation updates
Documentation changes based on Pavel's feedback.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 83b42be2efece386976507555c29e7773a0dfcd1
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:39:25 2007 -0400
cpuidle: add rating to the governors and pick the one with highest rating by default
Introduce a governor rating scheme to pick the right governor by default.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d2a74b8c5e8f22def4709330d4bfc4a29209b71c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:38:08 2007 -0400
cpuidle: make cpuidle sysfs driver governor switch off by default
Make default cpuidle sysfs to show current_governor and current_driver in
read-only mode. More elaborate available_governors and available_drivers with
writeable current_governor and current_driver interface only appear with
"cpuidle_sysfs_switch" boot parameter.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1f60a0e80bf83cf6b55c8845bbe5596ed8f6307b
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:37:00 2007 -0400
cpuidle: menu governor: change the early break condition
Change the C-state early break out algorithm in menu governor.
We only look at early breakouts that result in wakeups shorter than idle
state's target_residency. If such a breakout is frequent enough, eliminate
the particular idle state upto a timeout period.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 45a42095cf64b003b4a69be3ce7f434f97d7af51
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:35:38 2007 -0400
cpuidle: fix uninitialized variable in sysfs routine
Fix the uninitialized usage of ret.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 80dca7cdba3e6ee13eae277660873ab9584eb3be
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:34:16 2007 -0400
cpuidle: reenable /proc/acpi//power interface for the time being
Keep /proc/acpi/processor/CPU*/power around for a while as powertop depends
on it. It will be marked deprecated and removed in future. powertop can use
cpuidle interfaces instead.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 589c37c2646c5e3813a51255a5ee1159cb4c33fc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:32:37 2007 -0400
cpuidle: menu governor and hrtimer compile fix
Compile fix for menu governor.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ba80bd9ab3ed304cb4f19b722e4cc6740588b5e
Author: Len Brown <len.brown@intel.com>
Date: Thu May 31 22:51:43 2007 -0400
cpuidle: build fix - cpuidle vs ipw2100 module
ERROR: "acpi_set_cstate_limit" [drivers/net/wireless/ipw2100.ko] undefined!
Signed-off-by: Len Brown <len.brown@intel.com>
commit d7d8fa7f96a7f7682be7c6cc0cc53fa7a18c3b58
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:07 2007 -0400
cpuidle: add the 'menu' governor
Here is my first take at implementing an idle PM governor that takes
full advantage of NO_HZ. I call it the 'menu' governor because it
considers the full list of idle states before each entry.
I've kept the implementation fairly simple. It attempts to guess the
next residency time and then chooses a state that would meet at least
the break-even point between power savings and entry cost. To this end,
it selects the deepest idle state that satisfies the following
constraints:
1. If the idle time elapsed since bus master activity was detected
is below a threshold (currently 20 ms), then limit the selection
to C2-type or above.
2. Do not choose a state with a break-even residency that exceeds
the expected time remaining until the next timer interrupt.
3. Do not choose a state with a break-even residency that exceeds
the elapsed time between the last pair of break events,
excluding timer interrupts.
This governor has an advantage over "ladder" governor because it
proactively checks how much time remains until the next timer interrupt
using the tick infrastructure. Also, it handles device interrupt
activity more intelligently by not including timer interrupts in break
event calculations. Finally, it doesn't make policy decisions using the
number of state entries, which can have variable residency times (NO_HZ
makes these potentially very large), and instead only considers sleep
time deltas.
The menu governor can be selected during runtime using the cpuidle sysfs
interface like so:
"echo "menu" > /sys/devices/system/cpu/cpuidle/current_governor"
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit a4bec7e65aa3b7488b879d971651cc99a6c410fe
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:03 2007 -0400
cpuidle: export time until next timer interrupt using NO_HZ
Expose information about the time remaining until the next
timer interrupt expires by utilizing the dynticks infrastructure.
Also modify the main idle loop to allow dynticks to handle
non-interrupt break events (e.g. DMA). Finally, expose sleep ticks
information to external code. Thomas Gleixner is responsible for much
of the code in this patch. However, I've made some additional changes,
so I'm probably responsible if there are any bugs or oversights :)
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2929d8996fbc77f41a5ff86bb67cdde3ca7d2d72
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:46:58 2007 -0400
cpuidle: governor API changes
This patch prepares cpuidle for the menu governor. It adds an optional
stage after idle state entry to give the governor an opportunity to
check why the state was exited. Also it makes sure the idle loop
returns after each state entry, allowing the appropriate dynticks code
to run.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 3a7fd42f9825c3b03e364ca59baa751bb350775f
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Apr 26 00:03:59 2007 -0700
cpuidle: hang fix
Prevent hang on x86-64, when ACPI processor driver is added as a module on
a system that does not support C-states.
x86-64 expects all idle handlers to enable interrupts before returning from
idle handler. This is due to enter_idle(), exit_idle() races. Make
cpuidle_idle_call() confirm to this when there is no pm_idle_old.
Also, cpuidle look at the return values of attch_driver() and set
current_driver to NULL if attach fails on all CPUs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4893339a142afbd5b7c01ffadfd53d14746e858e
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:09 2007 +0800
cpuidle: add support for max_cstate limit
With CPUIDLE framework, the max_cstate (to limit max cpu c-state)
parameter is ingored. Some systems require it to ignore C2/C3
and some drivers like ipw require it too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 43bbbbe1cb998cbd2df656f55bb3bfe30f30e7d1
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:13 2007 +0800
cpuidle: add cpuidle_fore_redetect_devices API
add cpuidle_force_redetect_devices API,
which forces all CPU redetect idle states.
Next patch will use it.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d1edadd608f24836def5ec483d2edccfb37b1d19
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:01 2007 +0800
cpuidle: fix sysfs related issue
Fix the cpuidle sysfs issue.
a. make kobject dynamicaly allocated
b. fixed sysfs init issue to avoid suspend/resume issue
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 7169a5cc0d67b263978859672e86c13c23a5570d
Author: Randy Dunlap <randy.dunlap@oracle.com>
Date: Wed Mar 28 22:52:53 2007 -0400
cpuidle: 1-bit field must be unsigned
A 1-bit bitfield has no room for a sign bit.
drivers/cpuidle/governors/ladder.c:54:16: error: dubious bitfield without explicit `signed' or `unsigned'
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4658620158dc2fbd9e4bcb213c5b6fb5d05ba7d4
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 28 22:52:41 2007 -0400
cpuidle: fix boot hang
Patch for cpuidle boot hang reported by Larry Finger here.
http://www.ussg.iu.edu/hypermail/linux/kernel/0703.2/2025.html
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Larry Finger <larry.finger@lwfinger.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c17e168aa6e5fe3851baaae8df2fbc1cf11443a9
Author: Len Brown <len.brown@intel.com>
Date: Wed Mar 7 04:37:53 2007 -0500
cpuidle: ladder does not depend on ACPI
build fix for CONFIG_ACPI=n
In file included from drivers/cpuidle/governors/ladder.c:21:
include/acpi/processor.h:88: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:106: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:168: error: expected specifier-qualifier-list before âacpi_handleâ
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8c91d958246bde68db0c3f0c57b535962ce861cb
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Mar 6 02:29:40 2007 -0800
cpuidle: make code static
This patch makes the following needlessly global code static:
- driver.c: __cpuidle_find_driver()
- governor.c: __cpuidle_find_governor()
- ladder.c: struct ladder_governor
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0c39dc3187094c72c33ab65a64d2017b21f372d2
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 7 02:38:22 2007 -0500
cpu_idle: fix build break
This patch fixes a build breakage with !CONFIG_HOTPLUG_CPU and
CONFIG_CPU_IDLE.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8112e3b115659b07df340ef170515799c0105f82
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Mar 6 02:29:39 2007 -0800
cpuidle: build fix for !CPU_IDLE
Fix the compile issues when CPU_IDLE is not configured.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1eb4431e9599cd25e0d9872f3c2c8986821839dd
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:57 2007 -0800
cpuidle take2: Basic documentation for cpuidle
Documentation for cpuidle infrastructure
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit ef5f15a8b79123a047285ec2e3899108661df779
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:03 2007 -0800
cpuidle take2: Hookup ACPI C-states driver with cpuidle
Hookup ACPI C-states onto generic cpuidle infrastructure.
drivers/acpi/procesor_idle.c is now a ACPI C-states driver that registers as
a driver in cpuidle infrastructure and the policy part is removed from
drivers/acpi/processor_idle.c. We use governor in cpuidle instead.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 987196fa82d4db52c407e8c9d5dec884ba602183
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:52:57 2007 -0800
cpuidle take2: Core cpuidle infrastructure
Announcing 'cpuidle', a new CPU power management infrastructure to manage
idle CPUs in a clean and efficient manner.
cpuidle separates out the drivers that can provide support for multiple types
of idle states and policy governors that decide on what idle state to use
at run time.
A cpuidle driver can support multiple idle states based on parameters like
varying power consumption, wakeup latency, etc (ACPI C-states for example).
A cpuidle governor can be usage model specific (laptop, server,
laptop on battery etc).
Main advantage of the infrastructure being, it allows independent development
of drivers and governors and allows for better CPU power management.
A huge thanks to Adam Belay and Shaohua Li who were part of this mini-project
since its beginning and are greatly responsible for this patchset.
This patch:
Core cpuidle infrastructure.
Introduces a new abstraction layer for cpuidle:
* which manages drivers that can support multiple idles states. Drivers
can be generic or particular to specific hardware/platform
* allows pluging in multiple policy governors that can take idle state policy
decision
* The core also has a set of sysfs interfaces with which administrato can know
about supported drivers and governors and switch them at run time.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2007-10-04 06:58:00 +08:00
|
|
|
/**
|
|
|
|
* tick_nohz_get_sleep_length - return the length of the current sleep
|
|
|
|
*
|
|
|
|
* Called from power state control code with interrupts disabled
|
|
|
|
*/
|
|
|
|
ktime_t tick_nohz_get_sleep_length(void)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
cpuidle: consolidate 2.6.22 cpuidle branch into one patch
commit e5a16b1f9eec0af7cfa0830304b41c1c0833cf9f
Author: Len Brown <len.brown@intel.com>
Date: Tue Oct 2 23:44:44 2007 -0400
cpuidle: shrink diff
processor_idle.c | 440 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 429 insertions(+), 11 deletions(-)
Signed-off-by: Len Brown <len.brown@intel.com>
commit dfbb9d5aedfb18848a3e0d6f6e3e4969febb209c
Author: Len Brown <len.brown@intel.com>
Date: Wed Sep 26 02:17:55 2007 -0400
cpuidle: reduce diff size
Reduces the cpuidle processor_idle.c diff vs 2.6.22 from this
processor_idle.c | 2006 ++++++++++++++++++++++++++-----------------
1 file changed, 1219 insertions(+), 787 deletions(-)
to this:
processor_idle.c | 502 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 458 insertions(+), 44 deletions(-)
...for the purpose of making the cpuilde patch less invasive
and easier to review.
no functional changes. build tested only.
Signed-off-by: Len Brown <len.brown@intel.com>
commit 889172fc915f5a7fe20f35b133cbd205ce69bf6c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:40:05 2007 -0700
cpuidle: Retain old ACPI policy for !CONFIG_CPU_IDLE
Retain the old policy in processor_idle, so that when CPU_IDLE is not
configured, old C-state policy will still be used. This provides a
clean gradual migration path from old ACPI policy to new cpuidle
based policy.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 9544a8181edc7ecc33b3bfd69271571f98ed08bc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Sep 13 13:39:17 2007 -0700
cpuidle: Configure governors by default
Quoting Len "Do not give an option to users to shoot themselves in the foot".
Remove the configurability of ladder and menu governors as they are
needed for default policy of cpuidle. That way users will not be able to
have cpuidle without any policy loosing all C-state power savings.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8975059a2c1e56cfe83d1bcf031bcf4cb39be743
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:27:07 2007 -0400
CPUIDLE: load ACPI properly when CPUIDLE is disabled
Change the registration return codes for when CPUIDLE
support is not compiled into the kernel. As a result, the ACPI
processor driver will load properly even if CPUIDLE is unavailable.
However, it may be possible to cleanup the ACPI processor driver further
and eliminate some dead code paths.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e0322e2b58dd1b12ec669bf84693efe0dc2414a8
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:26:06 2007 -0400
CPUIDLE: remove cpuidle_get_bm_activity()
Remove cpuidle_get_bm_activity() and updates governors
accordingly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 18a6e770d5c82ba26653e53d240caa617e09e9ab
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:58 2007 -0400
CPUIDLE: max_cstate fix
Currently max_cstate is limited to 0, resulting in no idle processor
power management on ACPI platforms. This patch restores the value to
the array size.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1fdc0887286179b40ce24bcdbde663172e205ef0
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:40 2007 -0400
CPUIDLE: handle BM detection inside the ACPI Processor driver
Update the ACPI processor driver to detect BM activity and
limit state entry depth internally, rather than exposing such
requirements to CPUIDLE. As a result, CPUIDLE can drop this
ACPI-specific interface and become more platform independent. BM
activity is now handled much more aggressively than it was in the
original implementation, so some testing coverage may be needed to
verify that this doesn't introduce any DMA buffer under-run issues.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ef38840db666f48e3cdd2b769da676c57228dd9
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:25:14 2007 -0400
CPUIDLE: menu governor updates
Tweak the menu governor to more effectively handle non-timer
break events. Non-timer break events are detected by comparing the
actual sleep time to the expected sleep time. In future revisions, it
may be more reliable to use the timer data structures directly.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit bb4d74fca63fa96cf3ace644b15ae0f12b7df5a1
Author: Adam Belay <abelay@novell.com>
Date: Tue Aug 21 18:24:40 2007 -0400
CPUIDLE: fix 'current_governor' sysfs entry
Allow the "current_governor" sysfs entry to properly handle
input terminated with '\n'.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df3c71559bb69b125f1a48971bf0d17f78bbdf47
Author: Len Brown <len.brown@intel.com>
Date: Sun Aug 12 02:00:45 2007 -0400
cpuidle: fix IA64 build (again)
Signed-off-by: Len Brown <len.brown@intel.com>
commit a02064579e3f9530fd31baae16b1fc46b5a7bca8
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:27 2007 -0400
cpuidle: Remove support for runtime changing of max_cstate
Remove support for runtime changeability of max_cstate. Drivers can use
use latency APIs.
max_cstate can still be used as a boot time option and dmi override.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0912a44b13adf22f5e3f607d263aed23b4910d7e
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:39:16 2007 -0400
cpuidle: Remove ACPI cstate_limit calls from ipw2100
ipw2100 already has code to use accetable_latency interfaces to limit the
C-state. Remove the calls to acpi_set_cstate_limit and acpi_get_cstate_limit
as they are redundant.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c649a76e76be6bff1fd770d0a775798813a3f6e0
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Sun Aug 12 01:35:39 2007 -0400
cpuidle: compile fix for pause and resume functions
Fix the compilation failure when cpuidle is not compiled in.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Adam Belay <adam.belay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2305a5920fb8ee6ccec1c62ade05aa8351091d71
Author: Adam Belay <abelay@novell.com>
Date: Thu Jul 19 00:49:00 2007 -0400
cpuidle: re-write
Some portions have been rewritten to make the code cleaner and lighter
weight. The following is a list of changes:
1.) the state name is now included in the sysfs interface
2.) detection, hotplug, and available state modifications are handled by
CPUIDLE drivers directly
3.) the CPUIDLE idle handler is only ever installed when at least one
cpuidle_device is enabled and ready
4.) the menu governor BM code no longer overflows
5.) the sysfs attributes are now printed as unsigned integers, avoiding
negative values
6.) a variety of other small cleanups
Also, Idle drivers are no longer swappable during runtime through the
CPUIDLE sysfs inteface. On i386 and x86_64 most idle handlers (e.g.
poll, mwait, halt, etc.) don't benefit from an infrastructure that
supports multiple states, so I think using a more general case idle
handler selection mechanism would be cleaner.
Signed-off-by: Adam Belay <abelay@novell.com>
Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit df25b6b56955714e6e24b574d88d1fd11f0c3ee5
Author: Len Brown <len.brown@intel.com>
Date: Tue Jul 24 17:08:21 2007 -0400
cpuidle: fix IA64 buid
Signed-off-by: Len Brown <len.brown@intel.com>
commit fd6ada4c14488755ff7068860078c437431fbccd
Author: Adrian Bunk <bunk@stusta.de>
Date: Mon Jul 9 11:33:13 2007 -0700
cpuidle: static
make cpuidle_replace_governor() static
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c1d4a2cebcadf2429c0c72e1d29aa2a9684c32e0
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Jul 3 00:54:40 2007 -0400
cpuidle: static
This patch makes the needlessly global struct menu_governor static.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit dbf8780c6e8d572c2c273da97ed1cca7608fd999
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:49:14 2007 -0400
export symbol tick_nohz_get_sleep_length
ERROR: "tick_nohz_get_sleep_length" [drivers/cpuidle/governors/menu.ko] undefined!
ERROR: "tick_nohz_get_idle_jiffies" [drivers/cpuidle/governors/menu.ko] undefined!
And please be sure to get your changes to core kernel suitably reviewed.
Cc: Adam Belay <abelay@novell.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 29f0e248e7017be15f99febf9143a2cef00b2961
Author: Andrew Morton <akpm@linux-foundation.org>
Date: Tue Jul 3 00:43:04 2007 -0400
tick.h needs hrtimer.h
It uses hrtimers.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit e40cede7d63a029e92712a3fe02faee60cc38fb4
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:40:34 2007 -0400
cpuidle: first round of documentation updates
Documentation changes based on Pavel's feedback.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 83b42be2efece386976507555c29e7773a0dfcd1
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:39:25 2007 -0400
cpuidle: add rating to the governors and pick the one with highest rating by default
Introduce a governor rating scheme to pick the right governor by default.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d2a74b8c5e8f22def4709330d4bfc4a29209b71c
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:38:08 2007 -0400
cpuidle: make cpuidle sysfs driver governor switch off by default
Make default cpuidle sysfs to show current_governor and current_driver in
read-only mode. More elaborate available_governors and available_drivers with
writeable current_governor and current_driver interface only appear with
"cpuidle_sysfs_switch" boot parameter.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1f60a0e80bf83cf6b55c8845bbe5596ed8f6307b
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:37:00 2007 -0400
cpuidle: menu governor: change the early break condition
Change the C-state early break out algorithm in menu governor.
We only look at early breakouts that result in wakeups shorter than idle
state's target_residency. If such a breakout is frequent enough, eliminate
the particular idle state upto a timeout period.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 45a42095cf64b003b4a69be3ce7f434f97d7af51
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:35:38 2007 -0400
cpuidle: fix uninitialized variable in sysfs routine
Fix the uninitialized usage of ret.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 80dca7cdba3e6ee13eae277660873ab9584eb3be
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:34:16 2007 -0400
cpuidle: reenable /proc/acpi//power interface for the time being
Keep /proc/acpi/processor/CPU*/power around for a while as powertop depends
on it. It will be marked deprecated and removed in future. powertop can use
cpuidle interfaces instead.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 589c37c2646c5e3813a51255a5ee1159cb4c33fc
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Jul 3 00:32:37 2007 -0400
cpuidle: menu governor and hrtimer compile fix
Compile fix for menu governor.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0ba80bd9ab3ed304cb4f19b722e4cc6740588b5e
Author: Len Brown <len.brown@intel.com>
Date: Thu May 31 22:51:43 2007 -0400
cpuidle: build fix - cpuidle vs ipw2100 module
ERROR: "acpi_set_cstate_limit" [drivers/net/wireless/ipw2100.ko] undefined!
Signed-off-by: Len Brown <len.brown@intel.com>
commit d7d8fa7f96a7f7682be7c6cc0cc53fa7a18c3b58
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:07 2007 -0400
cpuidle: add the 'menu' governor
Here is my first take at implementing an idle PM governor that takes
full advantage of NO_HZ. I call it the 'menu' governor because it
considers the full list of idle states before each entry.
I've kept the implementation fairly simple. It attempts to guess the
next residency time and then chooses a state that would meet at least
the break-even point between power savings and entry cost. To this end,
it selects the deepest idle state that satisfies the following
constraints:
1. If the idle time elapsed since bus master activity was detected
is below a threshold (currently 20 ms), then limit the selection
to C2-type or above.
2. Do not choose a state with a break-even residency that exceeds
the expected time remaining until the next timer interrupt.
3. Do not choose a state with a break-even residency that exceeds
the elapsed time between the last pair of break events,
excluding timer interrupts.
This governor has an advantage over "ladder" governor because it
proactively checks how much time remains until the next timer interrupt
using the tick infrastructure. Also, it handles device interrupt
activity more intelligently by not including timer interrupts in break
event calculations. Finally, it doesn't make policy decisions using the
number of state entries, which can have variable residency times (NO_HZ
makes these potentially very large), and instead only considers sleep
time deltas.
The menu governor can be selected during runtime using the cpuidle sysfs
interface like so:
"echo "menu" > /sys/devices/system/cpu/cpuidle/current_governor"
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit a4bec7e65aa3b7488b879d971651cc99a6c410fe
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:47:03 2007 -0400
cpuidle: export time until next timer interrupt using NO_HZ
Expose information about the time remaining until the next
timer interrupt expires by utilizing the dynticks infrastructure.
Also modify the main idle loop to allow dynticks to handle
non-interrupt break events (e.g. DMA). Finally, expose sleep ticks
information to external code. Thomas Gleixner is responsible for much
of the code in this patch. However, I've made some additional changes,
so I'm probably responsible if there are any bugs or oversights :)
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 2929d8996fbc77f41a5ff86bb67cdde3ca7d2d72
Author: Adam Belay <abelay@novell.com>
Date: Sat Mar 24 03:46:58 2007 -0400
cpuidle: governor API changes
This patch prepares cpuidle for the menu governor. It adds an optional
stage after idle state entry to give the governor an opportunity to
check why the state was exited. Also it makes sure the idle loop
returns after each state entry, allowing the appropriate dynticks code
to run.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 3a7fd42f9825c3b03e364ca59baa751bb350775f
Author: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Apr 26 00:03:59 2007 -0700
cpuidle: hang fix
Prevent hang on x86-64, when ACPI processor driver is added as a module on
a system that does not support C-states.
x86-64 expects all idle handlers to enable interrupts before returning from
idle handler. This is due to enter_idle(), exit_idle() races. Make
cpuidle_idle_call() confirm to this when there is no pm_idle_old.
Also, cpuidle look at the return values of attch_driver() and set
current_driver to NULL if attach fails on all CPUs.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4893339a142afbd5b7c01ffadfd53d14746e858e
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:09 2007 +0800
cpuidle: add support for max_cstate limit
With CPUIDLE framework, the max_cstate (to limit max cpu c-state)
parameter is ingored. Some systems require it to ignore C2/C3
and some drivers like ipw require it too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 43bbbbe1cb998cbd2df656f55bb3bfe30f30e7d1
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:13 2007 +0800
cpuidle: add cpuidle_fore_redetect_devices API
add cpuidle_force_redetect_devices API,
which forces all CPU redetect idle states.
Next patch will use it.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit d1edadd608f24836def5ec483d2edccfb37b1d19
Author: Shaohua Li <shaohua.li@intel.com>
Date: Thu Apr 26 10:40:01 2007 +0800
cpuidle: fix sysfs related issue
Fix the cpuidle sysfs issue.
a. make kobject dynamicaly allocated
b. fixed sysfs init issue to avoid suspend/resume issue
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 7169a5cc0d67b263978859672e86c13c23a5570d
Author: Randy Dunlap <randy.dunlap@oracle.com>
Date: Wed Mar 28 22:52:53 2007 -0400
cpuidle: 1-bit field must be unsigned
A 1-bit bitfield has no room for a sign bit.
drivers/cpuidle/governors/ladder.c:54:16: error: dubious bitfield without explicit `signed' or `unsigned'
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 4658620158dc2fbd9e4bcb213c5b6fb5d05ba7d4
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 28 22:52:41 2007 -0400
cpuidle: fix boot hang
Patch for cpuidle boot hang reported by Larry Finger here.
http://www.ussg.iu.edu/hypermail/linux/kernel/0703.2/2025.html
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Larry Finger <larry.finger@lwfinger.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit c17e168aa6e5fe3851baaae8df2fbc1cf11443a9
Author: Len Brown <len.brown@intel.com>
Date: Wed Mar 7 04:37:53 2007 -0500
cpuidle: ladder does not depend on ACPI
build fix for CONFIG_ACPI=n
In file included from drivers/cpuidle/governors/ladder.c:21:
include/acpi/processor.h:88: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:106: error: expected specifier-qualifier-list before âacpi_integerâ
include/acpi/processor.h:168: error: expected specifier-qualifier-list before âacpi_handleâ
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8c91d958246bde68db0c3f0c57b535962ce861cb
Author: Adrian Bunk <bunk@stusta.de>
Date: Tue Mar 6 02:29:40 2007 -0800
cpuidle: make code static
This patch makes the following needlessly global code static:
- driver.c: __cpuidle_find_driver()
- governor.c: __cpuidle_find_governor()
- ladder.c: struct ladder_governor
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 0c39dc3187094c72c33ab65a64d2017b21f372d2
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Wed Mar 7 02:38:22 2007 -0500
cpu_idle: fix build break
This patch fixes a build breakage with !CONFIG_HOTPLUG_CPU and
CONFIG_CPU_IDLE.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 8112e3b115659b07df340ef170515799c0105f82
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Tue Mar 6 02:29:39 2007 -0800
cpuidle: build fix for !CPU_IDLE
Fix the compile issues when CPU_IDLE is not configured.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Adam Belay <abelay@novell.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 1eb4431e9599cd25e0d9872f3c2c8986821839dd
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:57 2007 -0800
cpuidle take2: Basic documentation for cpuidle
Documentation for cpuidle infrastructure
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit ef5f15a8b79123a047285ec2e3899108661df779
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:54:03 2007 -0800
cpuidle take2: Hookup ACPI C-states driver with cpuidle
Hookup ACPI C-states onto generic cpuidle infrastructure.
drivers/acpi/procesor_idle.c is now a ACPI C-states driver that registers as
a driver in cpuidle infrastructure and the policy part is removed from
drivers/acpi/processor_idle.c. We use governor in cpuidle instead.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Len Brown <len.brown@intel.com>
commit 987196fa82d4db52c407e8c9d5dec884ba602183
Author: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Date: Thu Feb 22 13:52:57 2007 -0800
cpuidle take2: Core cpuidle infrastructure
Announcing 'cpuidle', a new CPU power management infrastructure to manage
idle CPUs in a clean and efficient manner.
cpuidle separates out the drivers that can provide support for multiple types
of idle states and policy governors that decide on what idle state to use
at run time.
A cpuidle driver can support multiple idle states based on parameters like
varying power consumption, wakeup latency, etc (ACPI C-states for example).
A cpuidle governor can be usage model specific (laptop, server,
laptop on battery etc).
Main advantage of the infrastructure being, it allows independent development
of drivers and governors and allows for better CPU power management.
A huge thanks to Adam Belay and Shaohua Li who were part of this mini-project
since its beginning and are greatly responsible for this patchset.
This patch:
Core cpuidle infrastructure.
Introduces a new abstraction layer for cpuidle:
* which manages drivers that can support multiple idles states. Drivers
can be generic or particular to specific hardware/platform
* allows pluging in multiple policy governors that can take idle state policy
decision
* The core also has a set of sysfs interfaces with which administrato can know
about supported drivers and governors and switch them at run time.
Signed-off-by: Adam Belay <abelay@novell.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2007-10-04 06:58:00 +08:00
|
|
|
|
|
|
|
return ts->sleep_length;
|
|
|
|
}
|
|
|
|
|
2017-12-21 09:22:45 +08:00
|
|
|
/**
|
|
|
|
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
|
|
|
|
* for a particular CPU.
|
|
|
|
*
|
|
|
|
* Called from the schedutil frequency scaling governor in scheduler context.
|
|
|
|
*/
|
|
|
|
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = tick_get_tick_sched(cpu);
|
|
|
|
|
|
|
|
return ts->idle_calls;
|
|
|
|
}
|
|
|
|
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
/**
|
|
|
|
* tick_nohz_get_idle_calls - return the current idle calls counter value
|
|
|
|
*
|
|
|
|
* Called from the schedutil frequency scaling governor in scheduler context.
|
|
|
|
*/
|
|
|
|
unsigned long tick_nohz_get_idle_calls(void)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
|
|
|
|
|
|
|
return ts->idle_calls;
|
|
|
|
}
|
|
|
|
|
2011-07-28 10:00:47 +08:00
|
|
|
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
|
|
|
{
|
2012-07-17 00:00:34 +08:00
|
|
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
2011-07-28 10:00:47 +08:00
|
|
|
unsigned long ticks;
|
2012-07-17 00:00:34 +08:00
|
|
|
|
2015-11-19 23:47:32 +08:00
|
|
|
if (vtime_accounting_cpu_enabled())
|
2012-07-17 00:00:34 +08:00
|
|
|
return;
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* We stopped the tick in idle. Update process times would miss the
|
|
|
|
* time we slept as update_process_times does only a 1 tick
|
|
|
|
* accounting. Enforce that this is accounted to idle !
|
|
|
|
*/
|
|
|
|
ticks = jiffies - ts->idle_jiffies;
|
|
|
|
/*
|
|
|
|
* We might be one off. Do not randomly account a huge number of ticks!
|
|
|
|
*/
|
2008-12-31 22:11:38 +08:00
|
|
|
if (ticks && ticks < LONG_MAX)
|
|
|
|
account_idle_ticks(ticks);
|
|
|
|
#endif
|
2011-07-27 23:29:28 +08:00
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/**
|
2011-10-08 00:22:06 +08:00
|
|
|
* tick_nohz_idle_exit - restart the idle tick from the idle task
|
2007-02-16 17:28:03 +08:00
|
|
|
*
|
|
|
|
* Restart the idle tick when the CPU is woken up from idle
|
2011-10-08 00:22:06 +08:00
|
|
|
* This also exit the RCU extended quiescent state. The CPU
|
|
|
|
* can use RCU again after this function is called.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
2011-10-08 00:22:06 +08:00
|
|
|
void tick_nohz_idle_exit(void)
|
2007-02-16 17:28:03 +08:00
|
|
|
{
|
2014-08-18 01:30:27 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2008-01-30 20:30:04 +08:00
|
|
|
ktime_t now;
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2008-01-30 20:30:04 +08:00
|
|
|
local_irq_disable();
|
2011-10-08 22:01:00 +08:00
|
|
|
|
2012-01-25 01:59:43 +08:00
|
|
|
WARN_ON_ONCE(!ts->inidle);
|
|
|
|
|
|
|
|
ts->inidle = 0;
|
|
|
|
|
|
|
|
if (ts->idle_active || ts->tick_stopped)
|
2009-09-29 20:25:15 +08:00
|
|
|
now = ktime_get();
|
|
|
|
|
|
|
|
if (ts->idle_active)
|
2013-08-08 04:28:01 +08:00
|
|
|
tick_nohz_stop_idle(ts, now);
|
2008-01-30 20:30:04 +08:00
|
|
|
|
2011-07-28 10:00:47 +08:00
|
|
|
if (ts->tick_stopped) {
|
2016-04-13 21:56:51 +08:00
|
|
|
tick_nohz_restart_sched_tick(ts, now);
|
2011-07-28 10:00:47 +08:00
|
|
|
tick_nohz_account_idle_ticks(ts);
|
2008-01-30 20:30:04 +08:00
|
|
|
}
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The nohz low res interrupt handler
|
|
|
|
*/
|
|
|
|
static void tick_nohz_handler(struct clock_event_device *dev)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
struct pt_regs *regs = get_irq_regs();
|
|
|
|
ktime_t now = ktime_get();
|
|
|
|
|
2016-12-25 18:38:40 +08:00
|
|
|
dev->next_event = KTIME_MAX;
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2012-10-15 08:03:27 +08:00
|
|
|
tick_sched_do_timer(now);
|
2012-10-15 08:43:03 +08:00
|
|
|
tick_sched_handle(ts, regs);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2014-06-12 18:54:41 +08:00
|
|
|
/* No need to reprogram if we are running tickless */
|
|
|
|
if (unlikely(ts->tick_stopped))
|
|
|
|
return;
|
|
|
|
|
2015-04-15 05:08:54 +08:00
|
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
|
|
|
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
2015-05-27 06:50:33 +08:00
|
|
|
static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
|
|
|
|
{
|
|
|
|
if (!tick_nohz_enabled)
|
|
|
|
return;
|
|
|
|
ts->nohz_mode = mode;
|
|
|
|
/* One update is enough */
|
|
|
|
if (!test_and_set_bit(0, &tick_nohz_active))
|
2018-01-15 06:30:51 +08:00
|
|
|
timers_update_nohz();
|
2015-05-27 06:50:33 +08:00
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/**
|
|
|
|
* tick_nohz_switch_to_nohz - switch to nohz mode
|
|
|
|
*/
|
|
|
|
static void tick_nohz_switch_to_nohz(void)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
ktime_t next;
|
|
|
|
|
2014-04-15 13:24:41 +08:00
|
|
|
if (!tick_nohz_enabled)
|
2007-02-16 17:28:03 +08:00
|
|
|
return;
|
|
|
|
|
2015-05-07 20:35:59 +08:00
|
|
|
if (tick_switch_to_oneshot(tick_nohz_handler))
|
2007-02-16 17:28:03 +08:00
|
|
|
return;
|
2015-05-07 20:35:59 +08:00
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* Recycle the hrtimer in ts, so we can share the
|
|
|
|
* hrtimer_forward with the highres code.
|
|
|
|
*/
|
|
|
|
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
|
|
/* Get the next period */
|
|
|
|
next = tick_init_jiffy_update();
|
|
|
|
|
2015-04-15 05:08:54 +08:00
|
|
|
hrtimer_set_expires(&ts->sched_timer, next);
|
2016-01-27 19:26:07 +08:00
|
|
|
hrtimer_forward_now(&ts->sched_timer, tick_period);
|
|
|
|
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
2015-05-27 06:50:33 +08:00
|
|
|
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
|
|
|
|
2013-12-05 01:28:20 +08:00
|
|
|
static inline void tick_nohz_irq_enter(void)
|
2009-09-29 20:25:15 +08:00
|
|
|
{
|
2014-08-18 01:30:27 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2009-09-29 20:25:15 +08:00
|
|
|
ktime_t now;
|
|
|
|
|
|
|
|
if (!ts->idle_active && !ts->tick_stopped)
|
|
|
|
return;
|
|
|
|
now = ktime_get();
|
|
|
|
if (ts->idle_active)
|
2013-08-08 04:28:01 +08:00
|
|
|
tick_nohz_stop_idle(ts, now);
|
2016-07-04 17:50:35 +08:00
|
|
|
if (ts->tick_stopped)
|
2009-09-29 20:25:15 +08:00
|
|
|
tick_nohz_update_jiffies(now);
|
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void tick_nohz_switch_to_nohz(void) { }
|
2013-12-05 01:28:20 +08:00
|
|
|
static inline void tick_nohz_irq_enter(void) { }
|
2015-05-27 06:50:33 +08:00
|
|
|
static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2011-08-11 05:21:01 +08:00
|
|
|
#endif /* CONFIG_NO_HZ_COMMON */
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2008-10-17 15:59:47 +08:00
|
|
|
/*
|
|
|
|
* Called from irq_enter to notify about the possible interruption of idle()
|
|
|
|
*/
|
2013-12-05 01:28:20 +08:00
|
|
|
void tick_irq_enter(void)
|
2008-10-17 15:59:47 +08:00
|
|
|
{
|
2013-08-08 04:28:01 +08:00
|
|
|
tick_check_oneshot_broadcast_this_cpu();
|
2013-12-05 01:28:20 +08:00
|
|
|
tick_nohz_irq_enter();
|
2008-10-17 15:59:47 +08:00
|
|
|
}
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/*
|
|
|
|
* High resolution timer specific code
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
/*
|
2008-01-30 20:30:00 +08:00
|
|
|
* We rearm the timer until we get disabled by the idle code.
|
2012-10-25 01:07:35 +08:00
|
|
|
* Called with interrupts disabled.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
|
|
|
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts =
|
|
|
|
container_of(timer, struct tick_sched, sched_timer);
|
|
|
|
struct pt_regs *regs = get_irq_regs();
|
|
|
|
ktime_t now = ktime_get();
|
2007-05-08 15:30:03 +08:00
|
|
|
|
2012-10-15 08:03:27 +08:00
|
|
|
tick_sched_do_timer(now);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not call, when we are not in irq context and have
|
|
|
|
* no valid regs pointer
|
|
|
|
*/
|
2012-10-15 08:43:03 +08:00
|
|
|
if (regs)
|
|
|
|
tick_sched_handle(ts, regs);
|
2017-05-15 20:56:50 +08:00
|
|
|
else
|
|
|
|
ts->next_tick = 0;
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2014-06-12 18:54:41 +08:00
|
|
|
/* No need to reprogram if we are in idle or full dynticks mode */
|
|
|
|
if (unlikely(ts->tick_stopped))
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
hrtimer_forward(timer, now, tick_period);
|
|
|
|
|
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
2012-05-08 18:20:58 +08:00
|
|
|
static int sched_skew_tick;
|
|
|
|
|
2012-05-25 20:08:57 +08:00
|
|
|
static int __init skew_tick(char *str)
|
|
|
|
{
|
|
|
|
get_option(&str, &sched_skew_tick);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("skew_tick", skew_tick);
|
|
|
|
|
2007-02-16 17:28:03 +08:00
|
|
|
/**
|
|
|
|
* tick_setup_sched_timer - setup the tick emulation timer
|
|
|
|
*/
|
|
|
|
void tick_setup_sched_timer(void)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
ktime_t now = ktime_get();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emulate tick processing via per-CPU hrtimers:
|
|
|
|
*/
|
|
|
|
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
|
|
|
ts->sched_timer.function = tick_sched_timer;
|
|
|
|
|
2016-07-01 18:42:35 +08:00
|
|
|
/* Get the next period (per-CPU) */
|
2008-09-02 06:02:30 +08:00
|
|
|
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2012-11-22 03:31:52 +08:00
|
|
|
/* Offset the tick to avert jiffies_lock contention. */
|
2012-05-08 18:20:58 +08:00
|
|
|
if (sched_skew_tick) {
|
|
|
|
u64 offset = ktime_to_ns(tick_period) >> 1;
|
|
|
|
do_div(offset, num_possible_cpus());
|
|
|
|
offset *= smp_processor_id();
|
|
|
|
hrtimer_add_expires_ns(&ts->sched_timer, offset);
|
|
|
|
}
|
|
|
|
|
2015-04-15 05:08:52 +08:00
|
|
|
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
|
|
|
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
2015-05-27 06:50:33 +08:00
|
|
|
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
2008-08-21 07:37:38 +08:00
|
|
|
#endif /* HIGH_RES_TIMERS */
|
2007-02-16 17:28:03 +08:00
|
|
|
|
2011-08-11 05:21:01 +08:00
|
|
|
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
2007-02-16 17:28:03 +08:00
|
|
|
void tick_cancel_sched_timer(int cpu)
|
|
|
|
{
|
|
|
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
|
|
|
|
2008-08-21 07:37:38 +08:00
|
|
|
# ifdef CONFIG_HIGH_RES_TIMERS
|
2007-02-16 17:28:03 +08:00
|
|
|
if (ts->sched_timer.base)
|
|
|
|
hrtimer_cancel(&ts->sched_timer);
|
2008-08-21 07:37:38 +08:00
|
|
|
# endif
|
2008-03-05 06:59:55 +08:00
|
|
|
|
2013-05-03 21:02:50 +08:00
|
|
|
memset(ts, 0, sizeof(*ts));
|
2007-02-16 17:28:03 +08:00
|
|
|
}
|
2008-08-21 07:37:38 +08:00
|
|
|
#endif
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Async notification about clocksource changes
|
|
|
|
*/
|
|
|
|
void tick_clock_notify(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Async notification about clock event changes
|
|
|
|
*/
|
|
|
|
void tick_oneshot_notify(void)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
set_bit(0, &ts->check_clocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check, if a change happened, which makes oneshot possible.
|
|
|
|
*
|
|
|
|
* Called cyclic from the hrtimer softirq (driven by the timer
|
|
|
|
* softirq) allow_nohz signals, that we can switch into low-res nohz
|
|
|
|
* mode, because high resolution timers are disabled (either compile
|
2015-05-07 20:35:59 +08:00
|
|
|
* or runtime). Called with interrupts disabled.
|
2007-02-16 17:28:03 +08:00
|
|
|
*/
|
|
|
|
int tick_check_oneshot_change(int allow_nohz)
|
|
|
|
{
|
2014-08-18 01:30:25 +08:00
|
|
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
2007-02-16 17:28:03 +08:00
|
|
|
|
|
|
|
if (!test_and_clear_bit(0, &ts->check_clocks))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
|
|
|
|
return 0;
|
|
|
|
|
2008-02-08 20:19:24 +08:00
|
|
|
if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
|
2007-02-16 17:28:03 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!allow_nohz)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
tick_nohz_switch_to_nohz();
|
|
|
|
return 0;
|
|
|
|
}
|