Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Ingo Molnar: "The main purpose is to fix a full dynticks bug related to virtualization, where steal time accounting appears to be zero in /proc/stat even after a few seconds of competing guests running busy loops in a same host CPU. It's not a regression though as it was there since the beginning. The other commits are preparatory work to fix the bug and various cleanups" * 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: arch: Remove stub cputime.h headers sched: Remove needless round trip nsecs <-> tick conversion of steal time cputime: Fix jiffies based cputime assumption on steal accounting cputime: Bring cputime -> nsecs conversion cputime: Default implementation of nsecs -> cputime conversion cputime: Fix nsecs_to_cputime() return type cast
This commit is contained in:
commit
a21e40877a
|
@ -1,6 +1,7 @@
|
|||
|
||||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ALPHA_CPUTIME_H
|
||||
#define __ALPHA_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __ALPHA_CPUTIME_H */
|
|
@ -5,6 +5,7 @@ header-y += arch-v32/
|
|||
|
||||
generic-y += barrier.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += kvm_para.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __CRIS_CPUTIME_H
|
||||
#define __CRIS_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __CRIS_CPUTIME_H */
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _ASM_CPUTIME_H
|
||||
#define _ASM_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* _ASM_CPUTIME_H */
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __M32R_CPUTIME_H
|
||||
#define __M32R_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __M32R_CPUTIME_H */
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
generic-y += barrier.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/cputime.h>
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
generic-y += barrier.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/cputime.h>
|
|
@ -4,6 +4,7 @@ header-y +=
|
|||
|
||||
generic-y += barrier.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += hash.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += preempt.h
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef _ASM_SCORE_CPUTIME_H
|
||||
#define _ASM_SCORE_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* _ASM_SCORE_CPUTIME_H */
|
|
@ -5,4 +5,5 @@ genhdr-y += unistd_64.h
|
|||
genhdr-y += unistd_x32.h
|
||||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
#include <asm-generic/cputime.h>
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
static spinlock_t cpufreq_stats_lock;
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <asm/chpid.h>
|
||||
#include <asm/airq.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
#include <asm/fcx.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/crw.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/irqnr.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#ifndef arch_irq_stat_cpu
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
static int uptime_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
|
|
|
@ -15,8 +15,10 @@ typedef u64 __nocast cputime64_t;
|
|||
|
||||
|
||||
/*
|
||||
* Convert nanoseconds to cputime
|
||||
* Convert nanoseconds <-> cputime
|
||||
*/
|
||||
#define cputime_to_nsecs(__ct) \
|
||||
jiffies_to_nsecs(cputime_to_jiffies(__ct))
|
||||
#define nsecs_to_cputime64(__nsec) \
|
||||
jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
|
||||
#define nsecs_to_cputime(__nsec) \
|
||||
|
|
|
@ -44,7 +44,10 @@ typedef u64 __nocast cputime64_t;
|
|||
/*
|
||||
* Convert cputime <-> nanoseconds
|
||||
*/
|
||||
#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs))
|
||||
#define cputime_to_nsecs(__ct) \
|
||||
(__force u64)(__ct)
|
||||
#define nsecs_to_cputime(__nsecs) \
|
||||
(__force cputime_t)(__nsecs)
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
#ifndef __LINUX_CPUTIME_H
|
||||
#define __LINUX_CPUTIME_H
|
||||
|
||||
#include <asm/cputime.h>
|
||||
|
||||
#ifndef cputime_to_nsecs
|
||||
# define cputime_to_nsecs(__ct) \
|
||||
(cputime_to_usecs(__ct) * NSEC_PER_USEC)
|
||||
#endif
|
||||
|
||||
#ifndef nsecs_to_cputime
|
||||
# define nsecs_to_cputime(__nsecs) \
|
||||
usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_CPUTIME_H */
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/vtime.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
/*
|
||||
* 'kernel_stat.h' contains the definitions needed for doing
|
||||
|
|
|
@ -29,7 +29,7 @@ struct sched_param {
|
|||
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/sem.h>
|
||||
|
|
|
@ -823,19 +823,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
|||
#endif
|
||||
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
|
||||
if (static_key_false((¶virt_steal_rq_enabled))) {
|
||||
u64 st;
|
||||
|
||||
steal = paravirt_steal_clock(cpu_of(rq));
|
||||
steal -= rq->prev_steal_time_rq;
|
||||
|
||||
if (unlikely(steal > delta))
|
||||
steal = delta;
|
||||
|
||||
st = steal_ticks(steal);
|
||||
steal = st * TICK_NSEC;
|
||||
|
||||
rq->prev_steal_time_rq += steal;
|
||||
|
||||
delta -= steal;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -258,16 +258,22 @@ static __always_inline bool steal_account_process_tick(void)
|
|||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
if (static_key_false(¶virt_steal_enabled)) {
|
||||
u64 steal, st = 0;
|
||||
u64 steal;
|
||||
cputime_t steal_ct;
|
||||
|
||||
steal = paravirt_steal_clock(smp_processor_id());
|
||||
steal -= this_rq()->prev_steal_time;
|
||||
|
||||
st = steal_ticks(steal);
|
||||
this_rq()->prev_steal_time += st * TICK_NSEC;
|
||||
/*
|
||||
* cputime_t may be less precise than nsecs (eg: if it's
|
||||
* based on jiffies). Lets cast the result to cputime
|
||||
* granularity and account the rest on the next rounds.
|
||||
*/
|
||||
steal_ct = nsecs_to_cputime(steal);
|
||||
this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
|
||||
|
||||
account_steal_time(st);
|
||||
return st;
|
||||
account_steal_time(steal_ct);
|
||||
return steal_ct;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
|
|
|
@ -1216,16 +1216,6 @@ extern void update_idle_cpu_load(struct rq *this_rq);
|
|||
|
||||
extern void init_task_runnable_average(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
static inline u64 steal_ticks(u64 steal)
|
||||
{
|
||||
if (unlikely(steal > NSEC_PER_SEC))
|
||||
return div_u64(steal, TICK_NSEC);
|
||||
|
||||
return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void inc_nr_running(struct rq *rq)
|
||||
{
|
||||
rq->nr_running++;
|
||||
|
|
Loading…
Reference in New Issue