nohz: Rename CONFIG_NO_HZ to CONFIG_NO_HZ_COMMON
We are planning to convert the dynticks Kconfig options layout into a choice menu. The user must be able to easily pick any of the following implementations: constant periodic tick, idle dynticks, full dynticks. As this implies a mutual exclusion, the two dynticks implementions need to converge on the selection of a common Kconfig option in order to ease the sharing of a common infrastructure. It would thus seem pretty natural to reuse CONFIG_NO_HZ to that end. It already implements all the idle dynticks code and the full dynticks depends on all that code for now. So ideally the choice menu would propose CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED then both would select CONFIG_NO_HZ. On the other hand we want to stay backward compatible: if CONFIG_NO_HZ is set in an older config file, we want to enable CONFIG_NO_HZ_IDLE by default. But we can't afford both at the same time or we run into a circular dependency: 1) CONFIG_NO_HZ_IDLE and CONFIG_NO_HZ_EXTENDED both select CONFIG_NO_HZ 2) If CONFIG_NO_HZ is set, we default to CONFIG_NO_HZ_IDLE We might be able to support that from Kconfig/Kbuild but it may not be wise to introduce such a confusing behaviour. So to solve this, create a new CONFIG_NO_HZ_COMMON option which gathers the common code between idle and full dynticks (that common code for now is simply the idle dynticks code) and select it from their referring Kconfig. Then we'll later create CONFIG_NO_HZ_IDLE and map CONFIG_NO_HZ to it for backward compatibility. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
ab71d36ddb
commit
3451d0243c
|
@ -176,7 +176,7 @@ o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
|
||||||
o A hardware or software issue shuts off the scheduler-clock
|
o A hardware or software issue shuts off the scheduler-clock
|
||||||
interrupt on a CPU that is not in dyntick-idle mode. This
|
interrupt on a CPU that is not in dyntick-idle mode. This
|
||||||
problem really has happened, and seems to be most likely to
|
problem really has happened, and seems to be most likely to
|
||||||
result in RCU CPU stall warnings for CONFIG_NO_HZ=n kernels.
|
result in RCU CPU stall warnings for CONFIG_NO_HZ_COMMON=n kernels.
|
||||||
|
|
||||||
o A bug in the RCU implementation.
|
o A bug in the RCU implementation.
|
||||||
|
|
||||||
|
|
|
@ -131,8 +131,8 @@ sampling_rate_min:
|
||||||
The sampling rate is limited by the HW transition latency:
|
The sampling rate is limited by the HW transition latency:
|
||||||
transition_latency * 100
|
transition_latency * 100
|
||||||
Or by kernel restrictions:
|
Or by kernel restrictions:
|
||||||
If CONFIG_NO_HZ is set, the limit is 10ms fixed.
|
If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed.
|
||||||
If CONFIG_NO_HZ is not set or nohz=off boot parameter is used, the
|
If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is used, the
|
||||||
limits depend on the CONFIG_HZ option:
|
limits depend on the CONFIG_HZ option:
|
||||||
HZ=1000: min=20000us (20ms)
|
HZ=1000: min=20000us (20ms)
|
||||||
HZ=250: min=80000us (80ms)
|
HZ=250: min=80000us (80ms)
|
||||||
|
|
|
@ -30,8 +30,8 @@ DEFINE(UM_NSEC_PER_USEC, NSEC_PER_USEC);
|
||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
DEFINE(UML_CONFIG_PRINTK, CONFIG_PRINTK);
|
DEFINE(UML_CONFIG_PRINTK, CONFIG_PRINTK);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
DEFINE(UML_CONFIG_NO_HZ, CONFIG_NO_HZ);
|
DEFINE(UML_CONFIG_NO_HZ_COMMON, CONFIG_NO_HZ_COMMON);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_UML_X86
|
#ifdef CONFIG_UML_X86
|
||||||
DEFINE(UML_CONFIG_UML_X86, CONFIG_UML_X86);
|
DEFINE(UML_CONFIG_UML_X86, CONFIG_UML_X86);
|
||||||
|
|
|
@ -79,7 +79,7 @@ long long os_nsecs(void)
|
||||||
return timeval_to_ns(&tv);
|
return timeval_to_ns(&tv);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef UML_CONFIG_NO_HZ
|
#ifdef UML_CONFIG_NO_HZ_COMMON
|
||||||
static int after_sleep_interval(struct timespec *ts)
|
static int after_sleep_interval(struct timespec *ts)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -230,7 +230,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
|
||||||
|
|
||||||
extern int runqueue_is_locked(int cpu);
|
extern int runqueue_is_locked(int cpu);
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||||
extern void nohz_balance_enter_idle(int cpu);
|
extern void nohz_balance_enter_idle(int cpu);
|
||||||
extern void set_cpu_sd_state_idle(void);
|
extern void set_cpu_sd_state_idle(void);
|
||||||
extern int get_nohz_timer_target(void);
|
extern int get_nohz_timer_target(void);
|
||||||
|
@ -1758,13 +1758,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
void calc_load_enter_idle(void);
|
void calc_load_enter_idle(void);
|
||||||
void calc_load_exit_idle(void);
|
void calc_load_exit_idle(void);
|
||||||
#else
|
#else
|
||||||
static inline void calc_load_enter_idle(void) { }
|
static inline void calc_load_enter_idle(void) { }
|
||||||
static inline void calc_load_exit_idle(void) { }
|
static inline void calc_load_exit_idle(void) { }
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
#ifndef CONFIG_CPUMASK_OFFSTACK
|
#ifndef CONFIG_CPUMASK_OFFSTACK
|
||||||
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||||
|
@ -1850,7 +1850,7 @@ extern void idle_task_exit(void);
|
||||||
static inline void idle_task_exit(void) {}
|
static inline void idle_task_exit(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
|
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
|
||||||
extern void wake_up_nohz_cpu(int cpu);
|
extern void wake_up_nohz_cpu(int cpu);
|
||||||
#else
|
#else
|
||||||
static inline void wake_up_nohz_cpu(int cpu) { }
|
static inline void wake_up_nohz_cpu(int cpu) { }
|
||||||
|
|
|
@ -82,7 +82,7 @@ extern int tick_program_event(ktime_t expires, int force);
|
||||||
extern void tick_setup_sched_timer(void);
|
extern void tick_setup_sched_timer(void);
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
# if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
|
# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
||||||
extern void tick_cancel_sched_timer(int cpu);
|
extern void tick_cancel_sched_timer(int cpu);
|
||||||
# else
|
# else
|
||||||
static inline void tick_cancel_sched_timer(int cpu) { }
|
static inline void tick_cancel_sched_timer(int cpu) { }
|
||||||
|
@ -123,7 +123,7 @@ static inline void tick_check_idle(int cpu) { }
|
||||||
static inline int tick_oneshot_mode_active(void) { return 0; }
|
static inline int tick_oneshot_mode_active(void) { return 0; }
|
||||||
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
||||||
|
|
||||||
# ifdef CONFIG_NO_HZ
|
# ifdef CONFIG_NO_HZ_COMMON
|
||||||
DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
||||||
|
|
||||||
static inline int tick_nohz_tick_stopped(void)
|
static inline int tick_nohz_tick_stopped(void)
|
||||||
|
@ -138,7 +138,7 @@ extern ktime_t tick_nohz_get_sleep_length(void);
|
||||||
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
||||||
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
||||||
|
|
||||||
# else /* !CONFIG_NO_HZ */
|
# else /* !CONFIG_NO_HZ_COMMON */
|
||||||
static inline int tick_nohz_tick_stopped(void)
|
static inline int tick_nohz_tick_stopped(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -155,7 +155,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
|
||||||
}
|
}
|
||||||
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
|
||||||
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
||||||
# endif /* !NO_HZ */
|
# endif /* !CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_EXTENDED
|
#ifdef CONFIG_NO_HZ_EXTENDED
|
||||||
extern int tick_nohz_extended_cpu(int cpu);
|
extern int tick_nohz_extended_cpu(int cpu);
|
||||||
|
|
|
@ -580,7 +580,7 @@ config RCU_FANOUT_EXACT
|
||||||
|
|
||||||
config RCU_FAST_NO_HZ
|
config RCU_FAST_NO_HZ
|
||||||
bool "Accelerate last non-dyntick-idle CPU's grace periods"
|
bool "Accelerate last non-dyntick-idle CPU's grace periods"
|
||||||
depends on NO_HZ && SMP
|
depends on NO_HZ_COMMON && SMP
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
This option causes RCU to attempt to accelerate grace periods in
|
This option causes RCU to attempt to accelerate grace periods in
|
||||||
|
|
|
@ -160,7 +160,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
|
||||||
*/
|
*/
|
||||||
static int hrtimer_get_target(int this_cpu, int pinned)
|
static int hrtimer_get_target(int this_cpu, int pinned)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
|
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
|
||||||
return get_nohz_timer_target();
|
return get_nohz_timer_target();
|
||||||
#endif
|
#endif
|
||||||
|
@ -1106,7 +1106,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
|
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/**
|
/**
|
||||||
* hrtimer_get_next_event - get the time until next expiry event
|
* hrtimer_get_next_event - get the time until next expiry event
|
||||||
*
|
*
|
||||||
|
|
|
@ -549,7 +549,7 @@ void resched_cpu(int cpu)
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* In the semi idle case, use the nearest busy cpu for migrating timers
|
* In the semi idle case, use the nearest busy cpu for migrating timers
|
||||||
* from an idle cpu. This is good for power-savings.
|
* from an idle cpu. This is good for power-savings.
|
||||||
|
@ -641,14 +641,14 @@ static inline bool got_nohz_idle_kick(void)
|
||||||
return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
|
return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_NO_HZ */
|
#else /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
static inline bool got_nohz_idle_kick(void)
|
static inline bool got_nohz_idle_kick(void)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
void sched_avg_update(struct rq *rq)
|
void sched_avg_update(struct rq *rq)
|
||||||
{
|
{
|
||||||
|
@ -2139,7 +2139,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
|
||||||
return load >> FSHIFT;
|
return load >> FSHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* Handle NO_HZ for the global load-average.
|
* Handle NO_HZ for the global load-average.
|
||||||
*
|
*
|
||||||
|
@ -2365,12 +2365,12 @@ static void calc_global_nohz(void)
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
calc_load_idx++;
|
calc_load_idx++;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_NO_HZ */
|
#else /* !CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
static inline long calc_load_fold_idle(void) { return 0; }
|
static inline long calc_load_fold_idle(void) { return 0; }
|
||||||
static inline void calc_global_nohz(void) { }
|
static inline void calc_global_nohz(void) { }
|
||||||
|
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* calc_load - update the avenrun load estimates 10 ticks after the
|
* calc_load - update the avenrun load estimates 10 ticks after the
|
||||||
|
@ -2530,7 +2530,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
|
||||||
sched_avg_update(this_rq);
|
sched_avg_update(this_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* There is no sane way to deal with nohz on smp when using jiffies because the
|
* There is no sane way to deal with nohz on smp when using jiffies because the
|
||||||
* cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
|
* cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
|
||||||
|
@ -2590,7 +2590,7 @@ void update_cpu_load_nohz(void)
|
||||||
}
|
}
|
||||||
raw_spin_unlock(&this_rq->lock);
|
raw_spin_unlock(&this_rq->lock);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called from scheduler_tick()
|
* Called from scheduler_tick()
|
||||||
|
@ -7023,7 +7023,7 @@ void __init sched_init(void)
|
||||||
INIT_LIST_HEAD(&rq->cfs_tasks);
|
INIT_LIST_HEAD(&rq->cfs_tasks);
|
||||||
|
|
||||||
rq_attach_root(rq, &def_root_domain);
|
rq_attach_root(rq, &def_root_domain);
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
rq->nohz_flags = 0;
|
rq->nohz_flags = 0;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -5331,7 +5331,7 @@ out_unlock:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* idle load balancing details
|
* idle load balancing details
|
||||||
* - When one of the busy CPUs notice that there may be an idle rebalancing
|
* - When one of the busy CPUs notice that there may be an idle rebalancing
|
||||||
|
@ -5541,9 +5541,9 @@ out:
|
||||||
rq->next_balance = next_balance;
|
rq->next_balance = next_balance;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* In CONFIG_NO_HZ case, the idle balance kickee will do the
|
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
|
||||||
* rebalancing for all the cpus for whom scheduler ticks are stopped.
|
* rebalancing for all the cpus for whom scheduler ticks are stopped.
|
||||||
*/
|
*/
|
||||||
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
|
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
|
||||||
|
@ -5686,7 +5686,7 @@ void trigger_load_balance(struct rq *rq, int cpu)
|
||||||
if (time_after_eq(jiffies, rq->next_balance) &&
|
if (time_after_eq(jiffies, rq->next_balance) &&
|
||||||
likely(!on_null_domain(cpu)))
|
likely(!on_null_domain(cpu)))
|
||||||
raise_softirq(SCHED_SOFTIRQ);
|
raise_softirq(SCHED_SOFTIRQ);
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
|
if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
|
||||||
nohz_balancer_kick(cpu);
|
nohz_balancer_kick(cpu);
|
||||||
#endif
|
#endif
|
||||||
|
@ -6156,7 +6156,7 @@ __init void init_sched_fair_class(void)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
nohz.next_balance = jiffies;
|
nohz.next_balance = jiffies;
|
||||||
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
||||||
cpu_notifier(sched_ilb_notifier, 0);
|
cpu_notifier(sched_ilb_notifier, 0);
|
||||||
|
|
|
@ -404,7 +404,7 @@ struct rq {
|
||||||
#define CPU_LOAD_IDX_MAX 5
|
#define CPU_LOAD_IDX_MAX 5
|
||||||
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
||||||
unsigned long last_load_update_tick;
|
unsigned long last_load_update_tick;
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
u64 nohz_stamp;
|
u64 nohz_stamp;
|
||||||
unsigned long nohz_flags;
|
unsigned long nohz_flags;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1333,7 +1333,7 @@ extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
|
||||||
|
|
||||||
extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
|
extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
enum rq_nohz_flag_bits {
|
enum rq_nohz_flag_bits {
|
||||||
NOHZ_TICK_STOPPED,
|
NOHZ_TICK_STOPPED,
|
||||||
NOHZ_BALANCE_KICK,
|
NOHZ_BALANCE_KICK,
|
||||||
|
|
|
@ -348,7 +348,7 @@ void irq_exit(void)
|
||||||
if (!in_interrupt() && local_softirq_pending())
|
if (!in_interrupt() && local_softirq_pending())
|
||||||
invoke_softirq();
|
invoke_softirq();
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/* Make sure that timer wheel updates are propagated */
|
/* Make sure that timer wheel updates are propagated */
|
||||||
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
|
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
|
||||||
tick_nohz_irq_exit();
|
tick_nohz_irq_exit();
|
||||||
|
|
|
@ -64,16 +64,21 @@ config GENERIC_CMOS_UPDATE
|
||||||
if GENERIC_CLOCKEVENTS
|
if GENERIC_CLOCKEVENTS
|
||||||
menu "Timers subsystem"
|
menu "Timers subsystem"
|
||||||
|
|
||||||
# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
|
# Core internal switch. Selected by NO_HZ_COMMON / HIGH_RES_TIMERS. This is
|
||||||
# only related to the tick functionality. Oneshot clockevent devices
|
# only related to the tick functionality. Oneshot clockevent devices
|
||||||
# are supported independ of this.
|
# are supported independ of this.
|
||||||
config TICK_ONESHOT
|
config TICK_ONESHOT
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
config NO_HZ_COMMON
|
||||||
|
bool
|
||||||
|
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
|
||||||
|
select TICK_ONESHOT
|
||||||
|
|
||||||
config NO_HZ
|
config NO_HZ
|
||||||
bool "Tickless System (Dynamic Ticks)"
|
bool "Tickless System (Dynamic Ticks)"
|
||||||
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
|
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
|
||||||
select TICK_ONESHOT
|
select NO_HZ_COMMON
|
||||||
help
|
help
|
||||||
This option enables a tickless system: timer interrupts will
|
This option enables a tickless system: timer interrupts will
|
||||||
only trigger on an as-needed basis both when the system is
|
only trigger on an as-needed basis both when the system is
|
||||||
|
@ -81,14 +86,14 @@ config NO_HZ
|
||||||
|
|
||||||
config NO_HZ_EXTENDED
|
config NO_HZ_EXTENDED
|
||||||
bool "Full dynticks system"
|
bool "Full dynticks system"
|
||||||
# NO_HZ dependency
|
# NO_HZ_COMMON dependency
|
||||||
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
|
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
|
||||||
# RCU_USER_QS
|
# RCU_USER_QS
|
||||||
depends on HAVE_CONTEXT_TRACKING && SMP
|
depends on HAVE_CONTEXT_TRACKING && SMP
|
||||||
# RCU_NOCB_CPU dependency
|
# RCU_NOCB_CPU dependency
|
||||||
depends on TREE_RCU || TREE_PREEMPT_RCU
|
depends on TREE_RCU || TREE_PREEMPT_RCU
|
||||||
depends on VIRT_CPU_ACCOUNTING_GEN
|
depends on VIRT_CPU_ACCOUNTING_GEN
|
||||||
select NO_HZ
|
select NO_HZ_COMMON
|
||||||
select RCU_USER_QS
|
select RCU_USER_QS
|
||||||
select RCU_NOCB_CPU
|
select RCU_NOCB_CPU
|
||||||
select CONTEXT_TRACKING_FORCE
|
select CONTEXT_TRACKING_FORCE
|
||||||
|
|
|
@ -104,7 +104,7 @@ static void tick_sched_do_timer(ktime_t now)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* Check if the do_timer duty was dropped. We don't care about
|
* Check if the do_timer duty was dropped. We don't care about
|
||||||
* concurrency: This happens only when the cpu in charge went
|
* concurrency: This happens only when the cpu in charge went
|
||||||
|
@ -124,7 +124,7 @@ static void tick_sched_do_timer(ktime_t now)
|
||||||
|
|
||||||
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* When we are idle and the tick is stopped, we have to touch
|
* When we are idle and the tick is stopped, we have to touch
|
||||||
* the watchdog as we might not schedule for a really long
|
* the watchdog as we might not schedule for a really long
|
||||||
|
@ -235,7 +235,7 @@ core_initcall(init_tick_nohz_extended);
|
||||||
/*
|
/*
|
||||||
* NOHZ - aka dynamic tick functionality
|
* NOHZ - aka dynamic tick functionality
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* NO HZ enabled ?
|
* NO HZ enabled ?
|
||||||
*/
|
*/
|
||||||
|
@ -907,7 +907,7 @@ static inline void tick_check_nohz(int cpu)
|
||||||
static inline void tick_nohz_switch_to_nohz(void) { }
|
static inline void tick_nohz_switch_to_nohz(void) { }
|
||||||
static inline void tick_check_nohz(int cpu) { }
|
static inline void tick_check_nohz(int cpu) { }
|
||||||
|
|
||||||
#endif /* NO_HZ */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called from irq_enter to notify about the possible interruption of idle()
|
* Called from irq_enter to notify about the possible interruption of idle()
|
||||||
|
@ -992,14 +992,14 @@ void tick_setup_sched_timer(void)
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
if (tick_nohz_enabled)
|
if (tick_nohz_enabled)
|
||||||
ts->nohz_mode = NOHZ_MODE_HIGHRES;
|
ts->nohz_mode = NOHZ_MODE_HIGHRES;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif /* HIGH_RES_TIMERS */
|
#endif /* HIGH_RES_TIMERS */
|
||||||
|
|
||||||
#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
|
#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
|
||||||
void tick_cancel_sched_timer(int cpu)
|
void tick_cancel_sched_timer(int cpu)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||||
|
|
|
@ -738,7 +738,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
|
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
|
||||||
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
|
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
|
||||||
cpu = get_nohz_timer_target();
|
cpu = get_nohz_timer_target();
|
||||||
#endif
|
#endif
|
||||||
|
@ -1188,7 +1188,7 @@ static inline void __run_timers(struct tvec_base *base)
|
||||||
spin_unlock_irq(&base->lock);
|
spin_unlock_irq(&base->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
/*
|
/*
|
||||||
* Find out when the next timer event is due to happen. This
|
* Find out when the next timer event is due to happen. This
|
||||||
* is used on S/390 to stop all activity when a CPU is idle.
|
* is used on S/390 to stop all activity when a CPU is idle.
|
||||||
|
|
Loading…
Reference in New Issue