2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Common time routines among all ppc machines.
|
|
|
|
*
|
|
|
|
* Written by Cort Dougan (cort@cs.nmt.edu) to merge
|
|
|
|
* Paul Mackerras' version and mine for PReP and Pmac.
|
|
|
|
* MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
|
|
|
|
* Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
|
|
|
|
*
|
|
|
|
* First round of bugfixes by Gabriel Paubert (paubert@iram.es)
|
|
|
|
* to make clock more stable (2.4.0-test5). The only thing
|
|
|
|
* that this code assumes is that the timebases have been synchronized
|
|
|
|
* by firmware on SMP and are never stopped (never do sleep
|
|
|
|
* on SMP then, nap and doze are OK).
|
|
|
|
*
|
|
|
|
* Speeded up do_gettimeofday by getting rid of references to
|
|
|
|
* xtime (which required locks for consistency). (mikejc@us.ibm.com)
|
|
|
|
*
|
|
|
|
* TODO (not necessarily in this file):
|
|
|
|
* - improve precision and reproducibility of timebase frequency
|
2012-03-16 02:18:00 +08:00
|
|
|
* measurement at boot time.
|
2005-04-17 06:20:36 +08:00
|
|
|
* - for astronomical applications: add a new function to get
|
|
|
|
* non ambiguous timestamps even around leap seconds. This needs
|
|
|
|
* a new timestamp format and a good name.
|
|
|
|
*
|
|
|
|
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
|
|
|
|
* "A Kernel Model for Precision Timekeeping" by Dave Mills
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
2011-07-23 06:24:23 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sched.h>
|
2017-02-01 23:36:40 +08:00
|
|
|
#include <linux/sched/clock.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/param.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/timex.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/time.h>
|
2014-02-26 08:09:06 +08:00
|
|
|
#include <linux/clockchips.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/security.h>
|
2005-10-20 07:23:26 +08:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/rtc.h>
|
2006-02-20 07:38:56 +08:00
|
|
|
#include <linux/jiffies.h>
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
#include <linux/posix-timers.h>
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
#include <linux/irq.h>
|
2009-06-10 05:12:00 +08:00
|
|
|
#include <linux/delay.h>
|
2010-10-14 14:01:34 +08:00
|
|
|
#include <linux/irq_work.h>
|
2014-12-03 16:53:52 +08:00
|
|
|
#include <linux/clk-provider.h>
|
2016-01-06 08:45:51 +08:00
|
|
|
#include <linux/suspend.h>
|
2016-05-31 02:58:00 +08:00
|
|
|
#include <linux/rtc.h>
|
2017-02-05 18:48:36 +08:00
|
|
|
#include <linux/sched/cputime.h>
|
2017-06-06 21:08:32 +08:00
|
|
|
#include <linux/processor.h>
|
2009-10-27 02:49:14 +08:00
|
|
|
#include <asm/trace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/nvram.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/machdep.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/prom.h>
|
2005-10-20 07:23:26 +08:00
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/div64.h>
|
2005-11-07 10:18:13 +08:00
|
|
|
#include <asm/smp.h>
|
2005-11-11 18:15:21 +08:00
|
|
|
#include <asm/vdso_datapage.h>
|
2005-08-03 12:35:25 +08:00
|
|
|
#include <asm/firmware.h>
|
2016-09-06 13:32:43 +08:00
|
|
|
#include <asm/asm-prototypes.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-22 05:35:52 +08:00
|
|
|
/* powerpc clocksource/clockevent code */
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
#include <linux/clockchips.h>
|
2012-09-05 03:27:48 +08:00
|
|
|
#include <linux/timekeeper_internal.h>
|
2007-09-22 05:35:52 +08:00
|
|
|
|
2016-12-22 03:32:01 +08:00
|
|
|
static u64 rtc_read(struct clocksource *);
|
2007-09-22 05:35:52 +08:00
|
|
|
static struct clocksource clocksource_rtc = {
|
|
|
|
.name = "rtc",
|
|
|
|
.rating = 400,
|
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
|
|
.read = rtc_read,
|
|
|
|
};
|
|
|
|
|
2016-12-22 03:32:01 +08:00
|
|
|
static u64 timebase_read(struct clocksource *);
|
2007-09-22 05:35:52 +08:00
|
|
|
static struct clocksource clocksource_timebase = {
|
|
|
|
.name = "timebase",
|
|
|
|
.rating = 400,
|
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
|
|
.read = timebase_read,
|
|
|
|
};
|
|
|
|
|
2016-07-01 14:20:39 +08:00
|
|
|
#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
|
|
|
|
u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
|
2007-09-21 11:26:03 +08:00
|
|
|
|
|
|
|
static int decrementer_set_next_event(unsigned long evt,
|
|
|
|
struct clock_event_device *dev);
|
2015-07-16 19:26:25 +08:00
|
|
|
static int decrementer_shutdown(struct clock_event_device *evt);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
2012-04-18 14:01:19 +08:00
|
|
|
struct clock_event_device decrementer_clockevent = {
|
2015-07-16 19:26:25 +08:00
|
|
|
.name = "decrementer",
|
|
|
|
.rating = 200,
|
|
|
|
.irq = 0,
|
|
|
|
.set_next_event = decrementer_set_next_event,
|
|
|
|
.set_state_shutdown = decrementer_shutdown,
|
|
|
|
.tick_resume = decrementer_shutdown,
|
|
|
|
.features = CLOCK_EVT_FEAT_ONESHOT |
|
|
|
|
CLOCK_EVT_FEAT_C3STOP,
|
2007-09-21 11:26:03 +08:00
|
|
|
};
|
2012-04-18 14:01:19 +08:00
|
|
|
EXPORT_SYMBOL(decrementer_clockevent);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
powerpc/time: Optimise decrementer_check_overflow
decrementer_check_overflow is called from arch_local_irq_restore so
we want to make it as light weight as possible. As such, turn
decrementer_check_overflow into an inline function.
To avoid a circular mess of includes, separate out the two components
of struct decrementer_clock and keep the struct clock_event_device
part local to time.c.
The fast path improves from:
arch_local_irq_restore
0: mflr r0
4: std r0,16(r1)
8: stdu r1,-112(r1)
c: stb r3,578(r13)
10: cmpdi cr7,r3,0
14: beq- cr7,24 <.arch_local_irq_restore+0x24>
...
24: addi r1,r1,112
28: ld r0,16(r1)
2c: mtlr r0
30: blr
to:
arch_local_irq_restore
0: std r30,-16(r1)
4: ld r30,0(r2)
8: stb r3,578(r13)
c: cmpdi cr7,r3,0
10: beq- cr7,6c <.arch_local_irq_restore+0x6c>
...
6c: ld r30,-16(r1)
70: blr
Unfortunately we still setup a local TOC (due to -mminimal-toc). Yet
another sign we should be moving to -mcmodel=medium.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-11-24 04:07:22 +08:00
|
|
|
DEFINE_PER_CPU(u64, decrementers_next_tb);
|
|
|
|
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define XSEC_PER_SEC (1024*1024)
|
|
|
|
|
2005-10-20 07:23:26 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
|
|
|
|
#else
|
|
|
|
/* compute ((xsec << 12) * max) >> 32 */
|
|
|
|
#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long tb_ticks_per_jiffy;
|
|
|
|
unsigned long tb_ticks_per_usec = 100; /* sane default */
|
|
|
|
EXPORT_SYMBOL(tb_ticks_per_usec);
|
|
|
|
unsigned long tb_ticks_per_sec;
|
2006-02-27 12:41:47 +08:00
|
|
|
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
|
2006-02-20 07:38:56 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
DEFINE_SPINLOCK(rtc_lock);
|
2005-06-28 05:36:35 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rtc_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-04 12:04:31 +08:00
|
|
|
static u64 tb_to_ns_scale __read_mostly;
|
|
|
|
static unsigned tb_to_ns_shift __read_mostly;
|
2010-11-23 05:30:33 +08:00
|
|
|
static u64 boot_tb __read_mostly;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
extern struct timezone sys_tz;
|
2005-10-20 07:23:26 +08:00
|
|
|
static long timezone_offset;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-23 07:43:07 +08:00
|
|
|
unsigned long ppc_proc_freq;
|
2010-09-21 00:23:41 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ppc_proc_freq);
|
2005-06-23 07:43:07 +08:00
|
|
|
unsigned long ppc_tb_freq;
|
2010-09-21 00:23:41 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ppc_tb_freq);
|
2005-10-23 15:14:56 +08:00
|
|
|
|
2012-07-25 13:56:04 +08:00
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
/*
|
2017-01-31 11:09:48 +08:00
|
|
|
* Factor for converting from cputime_t (timebase ticks) to
|
|
|
|
* microseconds. This is stored as 0.64 fixed-point binary fraction.
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
*/
|
2011-12-09 19:35:08 +08:00
|
|
|
u64 __cputime_usec_factor;
|
|
|
|
EXPORT_SYMBOL(__cputime_usec_factor);
|
2009-07-29 18:15:29 +08:00
|
|
|
|
2016-05-17 14:33:46 +08:00
|
|
|
#ifdef CONFIG_PPC_SPLPAR
|
2010-08-31 09:59:53 +08:00
|
|
|
void (*dtl_consumer)(struct dtl_entry *, u64);
|
2016-05-17 14:33:46 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define get_accounting(tsk) (&get_paca()->accounting)
|
|
|
|
#else
|
|
|
|
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
|
|
|
|
#endif
|
2010-08-31 09:59:53 +08:00
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
static void calc_cputime_factors(void)
|
|
|
|
{
|
|
|
|
struct div_result res;
|
|
|
|
|
2011-12-09 19:35:08 +08:00
|
|
|
div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
|
|
|
|
__cputime_usec_factor = res.result_low;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
* Read the SPURR on systems that have it, otherwise the PURR,
|
|
|
|
* or if that doesn't exist return the timebase value passed in.
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
*/
|
2016-05-17 14:33:46 +08:00
|
|
|
static unsigned long read_spurr(unsigned long tb)
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
{
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
if (cpu_has_feature(CPU_FTR_SPURR))
|
|
|
|
return mfspr(SPRN_SPURR);
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
if (cpu_has_feature(CPU_FTR_PURR))
|
|
|
|
return mfspr(SPRN_PURR);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
return tb;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
}
|
|
|
|
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
#ifdef CONFIG_PPC_SPLPAR
|
|
|
|
|
2007-10-18 18:06:37 +08:00
|
|
|
/*
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
* Scan the dispatch trace log and count up the stolen time.
|
|
|
|
* Should be called with interrupts disabled.
|
2007-10-18 18:06:37 +08:00
|
|
|
*/
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
static u64 scan_dispatch_log(u64 stop_tb)
|
2007-10-18 18:06:37 +08:00
|
|
|
{
|
2010-08-31 09:59:53 +08:00
|
|
|
u64 i = local_paca->dtl_ridx;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
struct dtl_entry *dtl = local_paca->dtl_curr;
|
|
|
|
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
|
|
|
|
struct lppaca *vpa = local_paca->lppaca_ptr;
|
|
|
|
u64 tb_delta;
|
|
|
|
u64 stolen = 0;
|
|
|
|
u64 dtb;
|
|
|
|
|
2011-04-08 05:44:21 +08:00
|
|
|
if (!dtl)
|
|
|
|
return 0;
|
|
|
|
|
2013-08-07 00:01:46 +08:00
|
|
|
if (i == be64_to_cpu(vpa->dtl_idx))
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
return 0;
|
2013-08-07 00:01:46 +08:00
|
|
|
while (i < be64_to_cpu(vpa->dtl_idx)) {
|
|
|
|
dtb = be64_to_cpu(dtl->timebase);
|
|
|
|
tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
|
|
|
|
be32_to_cpu(dtl->ready_to_enqueue_time);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
barrier();
|
2013-08-07 00:01:46 +08:00
|
|
|
if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
/* buffer has overflowed */
|
2013-08-07 00:01:46 +08:00
|
|
|
i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (dtb > stop_tb)
|
|
|
|
break;
|
2013-11-17 08:39:05 +08:00
|
|
|
if (dtl_consumer)
|
|
|
|
dtl_consumer(dtl, i);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
stolen += tb_delta;
|
|
|
|
++i;
|
|
|
|
++dtl;
|
|
|
|
if (dtl == dtl_end)
|
|
|
|
dtl = local_paca->dispatch_log;
|
|
|
|
}
|
|
|
|
local_paca->dtl_ridx = i;
|
|
|
|
local_paca->dtl_curr = dtl;
|
|
|
|
return stolen;
|
2007-10-18 18:06:37 +08:00
|
|
|
}
|
|
|
|
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
/*
|
|
|
|
* Accumulate stolen time by scanning the dispatch trace log.
|
|
|
|
* Called on entry from user mode.
|
|
|
|
*/
|
|
|
|
void accumulate_stolen_time(void)
|
|
|
|
{
|
|
|
|
u64 sst, ust;
|
2017-12-20 11:55:50 +08:00
|
|
|
unsigned long save_irq_soft_mask = irq_soft_mask_return();
|
2016-05-17 14:33:46 +08:00
|
|
|
struct cpu_accounting_data *acct = &local_paca->accounting;
|
2011-01-03 11:49:25 +08:00
|
|
|
|
|
|
|
/* We are called early in the exception entry, before
|
|
|
|
* soft/hard_enabled are sync'ed to the expected state
|
|
|
|
* for the exception. We are hard disabled but the PACA
|
|
|
|
* needs to reflect that so various debug stuff doesn't
|
|
|
|
* complain
|
|
|
|
*/
|
2017-12-20 11:55:50 +08:00
|
|
|
irq_soft_mask_set(IRQS_DISABLED);
|
2011-01-03 11:49:25 +08:00
|
|
|
|
2016-05-17 14:33:46 +08:00
|
|
|
sst = scan_dispatch_log(acct->starttime_user);
|
|
|
|
ust = scan_dispatch_log(acct->starttime);
|
2017-01-06 01:11:45 +08:00
|
|
|
acct->stime -= sst;
|
|
|
|
acct->utime -= ust;
|
2017-01-06 01:11:46 +08:00
|
|
|
acct->steal_time += ust + sst;
|
2011-01-03 11:49:25 +08:00
|
|
|
|
2017-12-20 11:55:50 +08:00
|
|
|
irq_soft_mask_set(save_irq_soft_mask);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
|
|
{
|
2018-04-02 15:33:37 +08:00
|
|
|
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
|
|
|
return 0;
|
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
|
|
|
|
return scan_dispatch_log(stop_tb);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
return 0;
|
2007-10-18 18:06:37 +08:00
|
|
|
}
|
|
|
|
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
#else /* CONFIG_PPC_SPLPAR */
|
|
|
|
static inline u64 calculate_stolen_time(u64 stop_tb)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC_SPLPAR */
|
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
/*
|
|
|
|
* Account time for a transition between system, hard irq
|
|
|
|
* or soft irq state.
|
|
|
|
*/
|
2016-05-17 14:33:46 +08:00
|
|
|
static unsigned long vtime_delta(struct task_struct *tsk,
|
2017-01-06 01:11:47 +08:00
|
|
|
unsigned long *stime_scaled,
|
|
|
|
unsigned long *steal_time)
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
{
|
2016-05-17 14:33:46 +08:00
|
|
|
unsigned long now, nowscaled, deltascaled;
|
2017-01-06 01:11:47 +08:00
|
|
|
unsigned long stime;
|
|
|
|
unsigned long utime, utime_scaled;
|
2016-05-17 14:33:46 +08:00
|
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
|
2012-11-20 00:00:24 +08:00
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
now = mftb();
|
2007-10-18 18:06:37 +08:00
|
|
|
nowscaled = read_spurr(now);
|
2017-01-06 01:11:47 +08:00
|
|
|
stime = now - acct->starttime;
|
2016-05-17 14:33:46 +08:00
|
|
|
acct->starttime = now;
|
|
|
|
deltascaled = nowscaled - acct->startspurr;
|
|
|
|
acct->startspurr = nowscaled;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
*steal_time = calculate_stolen_time(now);
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
utime = acct->utime - acct->utime_sspurr;
|
2017-01-06 01:11:45 +08:00
|
|
|
acct->utime_sspurr = acct->utime;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Because we don't read the SPURR on every kernel entry/exit,
|
|
|
|
* deltascaled includes both user and system SPURR ticks.
|
|
|
|
* Apportion these ticks to system SPURR ticks and user
|
|
|
|
* SPURR ticks in the same ratio as the system time (delta)
|
|
|
|
* and user time (udelta) values obtained from the timebase
|
|
|
|
* over the same interval. The system ticks get accounted here;
|
|
|
|
* the user ticks get saved up in paca->user_time_scaled to be
|
|
|
|
* used by account_process_tick.
|
|
|
|
*/
|
2017-01-06 01:11:47 +08:00
|
|
|
*stime_scaled = stime;
|
|
|
|
utime_scaled = utime;
|
|
|
|
if (deltascaled != stime + utime) {
|
|
|
|
if (utime) {
|
|
|
|
*stime_scaled = deltascaled * stime / (stime + utime);
|
|
|
|
utime_scaled = deltascaled - *stime_scaled;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
} else {
|
2017-01-06 01:11:47 +08:00
|
|
|
*stime_scaled = deltascaled;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
}
|
|
|
|
}
|
2017-01-06 01:11:47 +08:00
|
|
|
acct->utime_scaled += utime_scaled;
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
return stime;
|
2012-09-08 22:14:02 +08:00
|
|
|
}
|
|
|
|
|
2012-11-14 01:21:22 +08:00
|
|
|
void vtime_account_system(struct task_struct *tsk)
|
2012-09-08 22:14:02 +08:00
|
|
|
{
|
2017-01-06 01:11:47 +08:00
|
|
|
unsigned long stime, stime_scaled, steal_time;
|
|
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
|
|
|
|
|
|
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
|
|
|
|
|
|
|
stime -= min(stime, steal_time);
|
|
|
|
acct->steal_time += steal_time;
|
2012-09-08 22:14:02 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
if ((tsk->flags & PF_VCPU) && !irq_count()) {
|
|
|
|
acct->gtime += stime;
|
|
|
|
acct->utime_scaled += stime_scaled;
|
|
|
|
} else {
|
|
|
|
if (hardirq_count())
|
|
|
|
acct->hardirq_time += stime;
|
|
|
|
else if (in_serving_softirq())
|
|
|
|
acct->softirq_time += stime;
|
|
|
|
else
|
|
|
|
acct->stime += stime;
|
|
|
|
|
|
|
|
acct->stime_scaled += stime_scaled;
|
|
|
|
}
|
2012-09-08 22:14:02 +08:00
|
|
|
}
|
2013-01-21 07:50:22 +08:00
|
|
|
EXPORT_SYMBOL_GPL(vtime_account_system);
|
2012-09-08 22:14:02 +08:00
|
|
|
|
2012-11-14 01:21:22 +08:00
|
|
|
void vtime_account_idle(struct task_struct *tsk)
|
2012-09-08 22:14:02 +08:00
|
|
|
{
|
2017-01-06 01:11:47 +08:00
|
|
|
unsigned long stime, stime_scaled, steal_time;
|
|
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
2012-09-08 22:14:02 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
|
|
|
acct->idle_time += stime + steal_time;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-01-06 01:11:50 +08:00
|
|
|
* Account the whole cputime accumulated in the paca
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
* Must be called with interrupts disabled.
|
2012-11-14 06:51:06 +08:00
|
|
|
* Assumes that vtime_account_system/idle() has been called
|
|
|
|
* recently (i.e. since the last entry from usermode) so that
|
powerpc: Account time using timebase rather than PURR
Currently, when CONFIG_VIRT_CPU_ACCOUNTING is enabled, we use the
PURR register for measuring the user and system time used by
processes, as well as other related times such as hardirq and
softirq times. This turns out to be quite confusing for users
because it means that a program will often be measured as taking
less time when run on a multi-threaded processor (SMT2 or SMT4 mode)
than it does when run on a single-threaded processor (ST mode), even
though the program takes longer to finish. The discrepancy is
accounted for as stolen time, which is also confusing, particularly
when there are no other partitions running.
This changes the accounting to use the timebase instead, meaning that
the reported user and system times are the actual number of real-time
seconds that the program was executing on the processor thread,
regardless of which SMT mode the processor is in. Thus a program will
generally show greater user and system times when run on a
multi-threaded processor than on a single-threaded processor.
On pSeries systems on POWER5 or later processors, we measure the
stolen time (time when this partition wasn't running) using the
hypervisor dispatch trace log. We check for new entries in the
log on every entry from user mode and on every transition from
kernel process context to soft or hard IRQ context (i.e. when
account_system_vtime() gets called). So that we can correctly
distinguish time stolen from user time and time stolen from system
time, without having to check the log on every exit to user mode,
we store separate timestamps for exit to user mode and entry from
user mode.
On systems that have a SPURR (POWER6 and POWER7), we read the SPURR
in account_system_vtime() (as before), and then apportion the SPURR
ticks since the last time we read it between scaled user time and
scaled system time according to the relative proportions of user
time and system time over the same interval. This avoids having to
read the SPURR on every kernel entry and exit. On systems that have
PURR but not SPURR (i.e., POWER5), we do the same using the PURR
rather than the SPURR.
This disables the DTL user interface in /sys/debug/kernel/powerpc/dtl
for now since it conflicts with the use of the dispatch trace log
by the time accounting code.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-08-27 03:56:43 +08:00
|
|
|
* get_paca()->user_time_scaled is up to date.
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
*/
|
2017-01-06 01:11:50 +08:00
|
|
|
void vtime_flush(struct task_struct *tsk)
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
{
|
2016-05-17 14:33:46 +08:00
|
|
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
|
2017-01-06 01:11:47 +08:00
|
|
|
if (acct->utime)
|
2017-01-31 11:09:37 +08:00
|
|
|
account_user_time(tsk, cputime_to_nsecs(acct->utime));
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->utime_scaled)
|
2017-01-31 11:09:23 +08:00
|
|
|
tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->gtime)
|
2017-01-31 11:09:40 +08:00
|
|
|
account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->steal_time)
|
2017-01-31 11:09:38 +08:00
|
|
|
account_steal_time(cputime_to_nsecs(acct->steal_time));
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->idle_time)
|
2017-01-31 11:09:39 +08:00
|
|
|
account_idle_time(cputime_to_nsecs(acct->idle_time));
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->stime)
|
2017-01-31 11:09:40 +08:00
|
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
|
|
|
|
CPUTIME_SYSTEM);
|
2017-01-06 01:11:47 +08:00
|
|
|
if (acct->stime_scaled)
|
2017-01-31 11:09:23 +08:00
|
|
|
tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
|
2017-01-06 01:11:47 +08:00
|
|
|
|
|
|
|
if (acct->hardirq_time)
|
2017-01-31 11:09:40 +08:00
|
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
|
|
|
|
CPUTIME_IRQ);
|
2017-01-06 01:11:47 +08:00
|
|
|
if (acct->softirq_time)
|
2017-01-31 11:09:40 +08:00
|
|
|
account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
|
|
|
|
CPUTIME_SOFTIRQ);
|
2017-01-06 01:11:47 +08:00
|
|
|
|
2017-01-06 01:11:45 +08:00
|
|
|
acct->utime = 0;
|
|
|
|
acct->utime_scaled = 0;
|
2016-05-17 14:33:46 +08:00
|
|
|
acct->utime_sspurr = 0;
|
2017-01-06 01:11:47 +08:00
|
|
|
acct->gtime = 0;
|
|
|
|
acct->steal_time = 0;
|
|
|
|
acct->idle_time = 0;
|
|
|
|
acct->stime = 0;
|
|
|
|
acct->stime_scaled = 0;
|
|
|
|
acct->hardirq_time = 0;
|
|
|
|
acct->softirq_time = 0;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
}
|
|
|
|
|
2016-05-17 14:33:46 +08:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
/*
|
|
|
|
* Called from the context switch with interrupts disabled, to charge all
|
|
|
|
* accumulated times to the current process, and to prepare accounting on
|
|
|
|
* the next process.
|
|
|
|
*/
|
|
|
|
void arch_vtime_task_switch(struct task_struct *prev)
|
|
|
|
{
|
|
|
|
struct cpu_accounting_data *acct = get_accounting(current);
|
|
|
|
|
|
|
|
acct->starttime = get_accounting(prev)->starttime;
|
2017-01-06 01:11:41 +08:00
|
|
|
acct->startspurr = get_accounting(prev)->startspurr;
|
2016-05-17 14:33:46 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC32 */
|
|
|
|
|
2012-07-25 13:56:04 +08:00
|
|
|
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
#define calc_cputime_factors()
|
|
|
|
#endif
|
|
|
|
|
2005-11-18 10:44:17 +08:00
|
|
|
void __delay(unsigned long loops)
|
|
|
|
{
|
|
|
|
unsigned long start;
|
|
|
|
int diff;
|
|
|
|
|
2017-06-06 21:08:32 +08:00
|
|
|
spin_begin();
|
2005-11-18 10:44:17 +08:00
|
|
|
if (__USE_RTC()) {
|
|
|
|
start = get_rtcl();
|
|
|
|
do {
|
|
|
|
/* the RTCL register wraps at 1000000000 */
|
|
|
|
diff = get_rtcl() - start;
|
|
|
|
if (diff < 0)
|
|
|
|
diff += 1000000000;
|
2017-06-06 21:08:32 +08:00
|
|
|
spin_cpu_relax();
|
2005-11-18 10:44:17 +08:00
|
|
|
} while (diff < loops);
|
|
|
|
} else {
|
|
|
|
start = get_tbl();
|
|
|
|
while (get_tbl() - start < loops)
|
2017-06-06 21:08:32 +08:00
|
|
|
spin_cpu_relax();
|
2005-11-18 10:44:17 +08:00
|
|
|
}
|
2017-06-06 21:08:32 +08:00
|
|
|
spin_end();
|
2005-11-18 10:44:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__delay);
|
|
|
|
|
|
|
|
void udelay(unsigned long usecs)
|
|
|
|
{
|
|
|
|
__delay(tb_ticks_per_usec * usecs);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(udelay);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
|
|
|
|
if (in_lock_functions(pc))
|
|
|
|
return regs->link;
|
|
|
|
|
|
|
|
return pc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(profile_pc);
|
|
|
|
#endif
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
#ifdef CONFIG_IRQ_WORK
|
2009-06-17 19:50:04 +08:00
|
|
|
|
2010-04-14 04:46:04 +08:00
|
|
|
/*
|
|
|
|
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC64
|
2010-10-14 14:01:34 +08:00
|
|
|
static inline unsigned long test_irq_work_pending(void)
|
2009-06-17 19:50:04 +08:00
|
|
|
{
|
2010-04-14 04:46:04 +08:00
|
|
|
unsigned long x;
|
|
|
|
|
|
|
|
asm volatile("lbz %0,%1(13)"
|
|
|
|
: "=r" (x)
|
2010-10-14 14:01:34 +08:00
|
|
|
: "i" (offsetof(struct paca_struct, irq_work_pending)));
|
2010-04-14 04:46:04 +08:00
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
static inline void set_irq_work_pending_flag(void)
|
2010-04-14 04:46:04 +08:00
|
|
|
{
|
|
|
|
asm volatile("stb %0,%1(13)" : :
|
|
|
|
"r" (1),
|
2010-10-14 14:01:34 +08:00
|
|
|
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
2010-04-14 04:46:04 +08:00
|
|
|
}
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
static inline void clear_irq_work_pending(void)
|
2010-04-14 04:46:04 +08:00
|
|
|
{
|
|
|
|
asm volatile("stb %0,%1(13)" : :
|
|
|
|
"r" (0),
|
2010-10-14 14:01:34 +08:00
|
|
|
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
2009-06-17 19:50:04 +08:00
|
|
|
}
|
|
|
|
|
2018-05-05 01:19:25 +08:00
|
|
|
void arch_irq_work_raise(void)
|
|
|
|
{
|
|
|
|
preempt_disable();
|
|
|
|
set_irq_work_pending_flag();
|
|
|
|
/*
|
|
|
|
* Non-nmi code running with interrupts disabled will replay
|
|
|
|
* irq_happened before it re-enables interrupts, so setthe
|
|
|
|
* decrementer there instead of causing a hardware exception
|
|
|
|
* which would immediately hit the masked interrupt handler
|
|
|
|
* and have the net effect of setting the decrementer in
|
|
|
|
* irq_happened.
|
|
|
|
*
|
|
|
|
* NMI interrupts can not check this when they return, so the
|
|
|
|
* decrementer hardware exception is raised, which will fire
|
|
|
|
* when interrupts are next enabled.
|
|
|
|
*
|
|
|
|
* BookE does not support this yet, it must audit all NMI
|
|
|
|
* interrupt handlers to ensure they call nmi_enter() so this
|
|
|
|
* check would be correct.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
|
|
|
|
set_dec(1);
|
|
|
|
} else {
|
|
|
|
hard_irq_disable();
|
|
|
|
local_paca->irq_happened |= PACA_IRQ_DEC;
|
|
|
|
}
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2010-04-14 04:46:04 +08:00
|
|
|
#else /* 32-bit */
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
DEFINE_PER_CPU(u8, irq_work_pending);
|
2010-04-14 04:46:04 +08:00
|
|
|
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
|
|
|
|
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
|
|
|
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
|
2009-06-17 19:50:04 +08:00
|
|
|
|
2011-06-27 23:22:43 +08:00
|
|
|
void arch_irq_work_raise(void)
|
2010-04-14 04:46:04 +08:00
|
|
|
{
|
|
|
|
preempt_disable();
|
2010-10-14 14:01:34 +08:00
|
|
|
set_irq_work_pending_flag();
|
2010-04-14 04:46:04 +08:00
|
|
|
set_dec(1);
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2018-05-05 01:19:25 +08:00
|
|
|
#endif /* 32 vs 64 bit */
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
#else /* CONFIG_IRQ_WORK */
|
2009-06-17 19:50:04 +08:00
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
#define test_irq_work_pending() 0
|
|
|
|
#define clear_irq_work_pending()
|
2009-06-17 19:50:04 +08:00
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
#endif /* CONFIG_IRQ_WORK */
|
2009-06-17 19:50:04 +08:00
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static void __timer_interrupt(void)
|
2014-02-26 08:08:01 +08:00
|
|
|
{
|
|
|
|
struct pt_regs *regs = get_irq_regs();
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
|
|
|
|
struct clock_event_device *evt = this_cpu_ptr(&decrementers);
|
2014-02-26 08:08:01 +08:00
|
|
|
u64 now;
|
|
|
|
|
|
|
|
trace_timer_interrupt_entry(regs);
|
|
|
|
|
|
|
|
if (test_irq_work_pending()) {
|
|
|
|
clear_irq_work_pending();
|
|
|
|
irq_work_run();
|
|
|
|
}
|
|
|
|
|
|
|
|
now = get_tb_or_rtc();
|
|
|
|
if (now >= *next_tb) {
|
|
|
|
*next_tb = ~(u64)0;
|
|
|
|
if (evt->event_handler)
|
|
|
|
evt->event_handler(evt);
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
__this_cpu_inc(irq_stat.timer_irqs_event);
|
2014-02-26 08:08:01 +08:00
|
|
|
} else {
|
|
|
|
now = *next_tb - now;
|
2016-07-01 14:20:39 +08:00
|
|
|
if (now <= decrementer_max)
|
|
|
|
set_dec(now);
|
2014-02-26 08:08:01 +08:00
|
|
|
/* We may have raced with new irq work */
|
|
|
|
if (test_irq_work_pending())
|
|
|
|
set_dec(1);
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
__this_cpu_inc(irq_stat.timer_irqs_others);
|
2014-02-26 08:08:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
trace_timer_interrupt_exit(regs);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* timer_interrupt - gets called when the decrementer overflows,
|
|
|
|
* with interrupts disabled.
|
|
|
|
*/
|
2005-09-19 22:30:27 +08:00
|
|
|
void timer_interrupt(struct pt_regs * regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
struct pt_regs *old_regs;
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
2011-03-29 11:51:10 +08:00
|
|
|
/* Ensure a positive value is written to the decrementer, or else
|
|
|
|
* some CPUs will continue to take decrementer exceptions.
|
|
|
|
*/
|
2016-07-01 14:20:39 +08:00
|
|
|
set_dec(decrementer_max);
|
2011-03-29 11:51:10 +08:00
|
|
|
|
|
|
|
/* Some implementations of hotplug will get timer interrupts while
|
2013-01-16 01:01:19 +08:00
|
|
|
* offline, just ignore these and we also need to set
|
|
|
|
* decrementers_next_tb as MAX to make sure __check_irq_replay
|
|
|
|
* don't replay timer interrupt when return, otherwise we'll trap
|
|
|
|
* here infinitely :(
|
2011-03-29 11:51:10 +08:00
|
|
|
*/
|
2013-01-16 01:01:19 +08:00
|
|
|
if (!cpu_online(smp_processor_id())) {
|
|
|
|
*next_tb = ~(u64)0;
|
2011-03-29 11:51:10 +08:00
|
|
|
return;
|
2013-01-16 01:01:19 +08:00
|
|
|
}
|
2011-03-29 11:51:10 +08:00
|
|
|
|
powerpc: Rework lazy-interrupt handling
The current implementation of lazy interrupts handling has some
issues that this tries to address.
We don't do the various workarounds we need to do when re-enabling
interrupts in some cases such as when returning from an interrupt
and thus we may still lose or get delayed decrementer or doorbell
interrupts.
The current scheme also makes it much harder to handle the external
"edge" interrupts provided by some BookE processors when using the
EPR facility (External Proxy) and the Freescale Hypervisor.
Additionally, we tend to keep interrupts hard disabled in a number
of cases, such as decrementer interrupts, external interrupts, or
when a masked decrementer interrupt is pending. This is sub-optimal.
This is an attempt at fixing it all in one go by reworking the way
we do the lazy interrupt disabling from the ground up.
The base idea is to replace the "hard_enabled" field with a
"irq_happened" field in which we store a bit mask of what interrupt
occurred while soft-disabled.
When re-enabling, either via arch_local_irq_restore() or when returning
from an interrupt, we can now decide what to do by testing bits in that
field.
We then implement replaying of the missed interrupts either by
re-using the existing exception frame (in exception exit case) or via
the creation of a new one from an assembly trampoline (in the
arch_local_irq_enable case).
This removes the need to play with the decrementer to try to create
fake interrupts, among others.
In addition, this adds a few refinements:
- We no longer hard disable decrementer interrupts that occur
while soft-disabled. We now simply bump the decrementer back to max
(on BookS) or leave it stopped (on BookE) and continue with hard interrupts
enabled, which means that we'll potentially get better sample quality from
performance monitor interrupts.
- Timer, decrementer and doorbell interrupts now hard-enable
shortly after removing the source of the interrupt, which means
they no longer run entirely hard disabled. Again, this will improve
perf sample quality.
- On Book3E 64-bit, we now make the performance monitor interrupt
act as an NMI like Book3S (the necessary C code for that to work
appear to already be present in the FSL perf code, notably calling
nmi_enter instead of irq_enter). (This also fixes a bug where BookE
perfmon interrupts could clobber r14 ... oops)
- We could make "masked" decrementer interrupts act as NMIs when doing
timer-based perf sampling to improve the sample quality.
Signed-off-by-yet: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
v2:
- Add hard-enable to decrementer, timer and doorbells
- Fix CR clobber in masked irq handling on BookE
- Make embedded perf interrupt act as an NMI
- Add a PACA_HAPPENED_EE_EDGE for use by FSL if they want
to retrigger an interrupt without preventing hard-enable
v3:
- Fix or vs. ori bug on Book3E
- Fix enabling of interrupts for some exceptions on Book3E
v4:
- Fix resend of doorbells on return from interrupt on Book3E
v5:
- Rebased on top of my latest series, which involves some significant
rework of some aspects of the patch.
v6:
- 32-bit compile fix
- more compile fixes with various .config combos
- factor out the asm code to soft-disable interrupts
- remove the C wrapper around preempt_schedule_irq
v7:
- Fix a bug with hard irq state tracking on native power7
2012-03-06 15:27:59 +08:00
|
|
|
/* Conditionally hard-enable interrupts now that the DEC has been
|
|
|
|
* bumped to its maximum value
|
|
|
|
*/
|
|
|
|
may_hard_irq_enable();
|
|
|
|
|
2010-02-01 04:34:06 +08:00
|
|
|
|
2014-05-21 04:24:58 +08:00
|
|
|
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
|
2005-10-20 07:23:26 +08:00
|
|
|
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
|
|
|
do_IRQ(regs);
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
old_regs = set_irq_regs(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
irq_enter();
|
|
|
|
|
2014-02-26 08:08:01 +08:00
|
|
|
__timer_interrupt();
|
2005-04-17 06:20:36 +08:00
|
|
|
irq_exit();
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
set_irq_regs(old_regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2016-01-14 12:33:46 +08:00
|
|
|
EXPORT_SYMBOL(timer_interrupt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-26 21:56:11 +08:00
|
|
|
/*
|
|
|
|
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
|
|
|
* left pending on exit from a KVM guest. We don't need to do anything
|
|
|
|
* to clear them, as they are edge-triggered.
|
|
|
|
*/
|
|
|
|
void hdec_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-12-13 01:35:19 +08:00
|
|
|
#ifdef CONFIG_SUSPEND
|
2010-06-21 03:04:14 +08:00
|
|
|
static void generic_suspend_disable_irqs(void)
|
2007-12-13 01:35:19 +08:00
|
|
|
{
|
|
|
|
/* Disable the decrementer, so that it doesn't interfere
|
|
|
|
* with suspending.
|
|
|
|
*/
|
|
|
|
|
2016-07-01 14:20:39 +08:00
|
|
|
set_dec(decrementer_max);
|
2007-12-13 01:35:19 +08:00
|
|
|
local_irq_disable();
|
2016-07-01 14:20:39 +08:00
|
|
|
set_dec(decrementer_max);
|
2007-12-13 01:35:19 +08:00
|
|
|
}
|
|
|
|
|
2010-06-21 03:04:14 +08:00
|
|
|
static void generic_suspend_enable_irqs(void)
|
2007-12-13 01:35:19 +08:00
|
|
|
{
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Overrides the weak version in kernel/power/main.c */
|
|
|
|
void arch_suspend_disable_irqs(void)
|
|
|
|
{
|
|
|
|
if (ppc_md.suspend_disable_irqs)
|
|
|
|
ppc_md.suspend_disable_irqs();
|
|
|
|
generic_suspend_disable_irqs();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Overrides the weak version in kernel/power/main.c */
|
|
|
|
void arch_suspend_enable_irqs(void)
|
|
|
|
{
|
|
|
|
generic_suspend_enable_irqs();
|
|
|
|
if (ppc_md.suspend_enable_irqs)
|
|
|
|
ppc_md.suspend_enable_irqs();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
KVM: PPC: Book3S HV: Accumulate timing information for real-mode code
This reads the timebase at various points in the real-mode guest
entry/exit code and uses that to accumulate total, minimum and
maximum time spent in those parts of the code. Currently these
times are accumulated per vcpu in 5 parts of the code:
* rm_entry - time taken from the start of kvmppc_hv_entry() until
just before entering the guest.
* rm_intr - time from when we take a hypervisor interrupt in the
guest until we either re-enter the guest or decide to exit to the
host. This includes time spent handling hcalls in real mode.
* rm_exit - time from when we decide to exit the guest until the
return from kvmppc_hv_entry().
* guest - time spend in the guest
* cede - time spent napping in real mode due to an H_CEDE hcall
while other threads in the same vcore are active.
These times are exposed in debugfs in a directory per vcpu that
contains a file called "timings". This file contains one line for
each of the 5 timings above, with the name followed by a colon and
4 numbers, which are the count (number of times the code has been
executed), the total time, the minimum time, and the maximum time,
all in nanoseconds.
The overhead of the extra code amounts to about 30ns for an hcall that
is handled in real mode (e.g. H_SET_DABR), which is about 25%. Since
production environments may not wish to incur this overhead, the new
code is conditional on a new config symbol,
CONFIG_KVM_BOOK3S_HV_EXIT_TIMING.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2015-03-28 11:21:02 +08:00
|
|
|
unsigned long long tb_to_ns(unsigned long long ticks)
|
|
|
|
{
|
|
|
|
return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tb_to_ns);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Scheduler clock - returns current time in nanosec units.
|
|
|
|
*
|
|
|
|
* Note: mulhdu(a, b) (multiply high double unsigned) returns
|
|
|
|
* the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
|
|
|
|
* are 64-bit unsigned numbers.
|
|
|
|
*/
|
2017-06-20 15:44:47 +08:00
|
|
|
notrace unsigned long long sched_clock(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-23 15:14:56 +08:00
|
|
|
if (__USE_RTC())
|
|
|
|
return get_rtc();
|
2007-07-04 12:04:31 +08:00
|
|
|
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 07:01:28 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Running clock - attempts to give a view of time passing for a virtualised
|
|
|
|
* kernels.
|
|
|
|
* Uses the VTB register if available otherwise a next best guess.
|
|
|
|
*/
|
|
|
|
unsigned long long running_clock(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Don't read the VTB as a host since KVM does not switch in host
|
|
|
|
* timebase into the VTB when it takes a guest off the CPU, reading the
|
|
|
|
* VTB would result in reading 'last switched out' guest VTB.
|
|
|
|
*
|
|
|
|
* Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
|
|
|
|
* would be unsafe to rely only on the #ifdef above.
|
|
|
|
*/
|
|
|
|
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
|
|
|
cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a next best approximation without a VTB.
|
|
|
|
* On a host which is running bare metal there should never be any stolen
|
|
|
|
* time and on a host which doesn't do any virtualisation TB *should* equal
|
|
|
|
* VTB so it makes no difference anyway.
|
|
|
|
*/
|
2017-02-21 23:18:41 +08:00
|
|
|
return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
|
2015-02-13 07:01:28 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-06-20 16:47:26 +08:00
|
|
|
static int __init get_freq(char *name, int cells, unsigned long *val)
|
2005-06-23 07:43:07 +08:00
|
|
|
{
|
|
|
|
struct device_node *cpu;
|
2013-08-07 00:01:34 +08:00
|
|
|
const __be32 *fp;
|
2006-06-20 16:47:26 +08:00
|
|
|
int found = 0;
|
2005-06-23 07:43:07 +08:00
|
|
|
|
2006-06-20 16:47:26 +08:00
|
|
|
/* The cpu node should have timebase and clock frequency properties */
|
2005-06-23 07:43:07 +08:00
|
|
|
cpu = of_find_node_by_type(NULL, "cpu");
|
|
|
|
|
2006-02-04 17:34:56 +08:00
|
|
|
if (cpu) {
|
2007-04-03 20:26:41 +08:00
|
|
|
fp = of_get_property(cpu, name, NULL);
|
2006-02-04 17:34:56 +08:00
|
|
|
if (fp) {
|
2006-06-20 16:47:26 +08:00
|
|
|
found = 1;
|
2006-09-19 12:06:27 +08:00
|
|
|
*val = of_read_ulong(fp, cells);
|
2005-06-23 07:43:07 +08:00
|
|
|
}
|
2006-06-20 16:47:26 +08:00
|
|
|
|
|
|
|
of_node_put(cpu);
|
2005-06-23 07:43:07 +08:00
|
|
|
}
|
2006-06-20 16:47:26 +08:00
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2014-08-20 06:55:18 +08:00
|
|
|
static void start_cpu_decrementer(void)
|
2009-08-28 12:25:04 +08:00
|
|
|
{
|
|
|
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
2017-05-19 23:47:05 +08:00
|
|
|
unsigned int tcr;
|
|
|
|
|
2009-08-28 12:25:04 +08:00
|
|
|
/* Clear any pending timer interrupts */
|
|
|
|
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
|
|
|
|
|
2017-05-19 23:47:05 +08:00
|
|
|
tcr = mfspr(SPRN_TCR);
|
|
|
|
/*
|
|
|
|
* The watchdog may have already been enabled by u-boot. So leave
|
|
|
|
* TRC[WP] (Watchdog Period) alone.
|
|
|
|
*/
|
|
|
|
tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
|
|
|
|
tcr |= TCR_DIE; /* Enable decrementer */
|
|
|
|
mtspr(SPRN_TCR, tcr);
|
|
|
|
#endif
|
2009-08-28 12:25:04 +08:00
|
|
|
}
|
|
|
|
|
2006-06-20 16:47:26 +08:00
|
|
|
void __init generic_calibrate_decr(void)
|
|
|
|
{
|
|
|
|
ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
|
|
|
|
|
|
|
|
if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
|
|
|
|
!get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
|
|
|
|
|
2005-06-23 07:43:07 +08:00
|
|
|
printk(KERN_ERR "WARNING: Estimating decrementer frequency "
|
|
|
|
"(not found)\n");
|
2006-06-20 16:47:26 +08:00
|
|
|
}
|
2005-06-23 07:43:07 +08:00
|
|
|
|
2006-06-20 16:47:26 +08:00
|
|
|
ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
|
|
|
|
|
|
|
|
if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
|
|
|
|
!get_freq("clock-frequency", 1, &ppc_proc_freq)) {
|
|
|
|
|
|
|
|
printk(KERN_ERR "WARNING: Estimating processor frequency "
|
|
|
|
"(not found)\n");
|
2005-06-23 07:43:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-21 11:26:02 +08:00
|
|
|
int update_persistent_clock(struct timespec now)
|
2005-10-20 07:23:26 +08:00
|
|
|
{
|
|
|
|
struct rtc_time tm;
|
|
|
|
|
2007-09-21 11:26:02 +08:00
|
|
|
if (!ppc_md.set_rtc_time)
|
2012-12-18 05:30:53 +08:00
|
|
|
return -ENODEV;
|
2007-09-21 11:26:02 +08:00
|
|
|
|
|
|
|
to_tm(now.tv_sec + 1 + timezone_offset, &tm);
|
|
|
|
tm.tm_year -= 1900;
|
|
|
|
tm.tm_mon -= 1;
|
|
|
|
|
|
|
|
return ppc_md.set_rtc_time(&tm);
|
|
|
|
}
|
|
|
|
|
2009-11-02 03:11:03 +08:00
|
|
|
static void __read_persistent_clock(struct timespec *ts)
|
2007-09-21 11:26:02 +08:00
|
|
|
{
|
|
|
|
struct rtc_time tm;
|
|
|
|
static int first = 1;
|
|
|
|
|
2009-08-23 04:23:13 +08:00
|
|
|
ts->tv_nsec = 0;
|
2007-09-21 11:26:02 +08:00
|
|
|
/* XXX this is a litle fragile but will work okay in the short term */
|
|
|
|
if (first) {
|
|
|
|
first = 0;
|
|
|
|
if (ppc_md.time_init)
|
|
|
|
timezone_offset = ppc_md.time_init();
|
|
|
|
|
|
|
|
/* get_boot_time() isn't guaranteed to be safe to call late */
|
2009-08-23 04:23:13 +08:00
|
|
|
if (ppc_md.get_boot_time) {
|
|
|
|
ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ppc_md.get_rtc_time) {
|
|
|
|
ts->tv_sec = 0;
|
|
|
|
return;
|
2007-09-21 11:26:02 +08:00
|
|
|
}
|
2005-10-20 07:23:26 +08:00
|
|
|
ppc_md.get_rtc_time(&tm);
|
2009-11-02 03:11:03 +08:00
|
|
|
|
2009-08-14 21:47:31 +08:00
|
|
|
ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
|
|
|
|
tm.tm_hour, tm.tm_min, tm.tm_sec);
|
2005-10-20 07:23:26 +08:00
|
|
|
}
|
|
|
|
|
2009-11-02 03:11:03 +08:00
|
|
|
void read_persistent_clock(struct timespec *ts)
|
|
|
|
{
|
|
|
|
__read_persistent_clock(ts);
|
|
|
|
|
|
|
|
/* Sanitize it in case real time clock is set below EPOCH */
|
|
|
|
if (ts->tv_sec < 0) {
|
|
|
|
ts->tv_sec = 0;
|
|
|
|
ts->tv_nsec = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-09-22 05:35:52 +08:00
|
|
|
/* clocksource code */
|
2017-06-20 15:44:47 +08:00
|
|
|
static notrace u64 rtc_read(struct clocksource *cs)
|
2007-09-22 05:35:52 +08:00
|
|
|
{
|
2016-12-22 03:32:01 +08:00
|
|
|
return (u64)get_rtc();
|
2007-09-22 05:35:52 +08:00
|
|
|
}
|
|
|
|
|
2017-06-20 15:44:47 +08:00
|
|
|
static notrace u64 timebase_read(struct clocksource *cs)
|
2007-09-22 05:35:52 +08:00
|
|
|
{
|
2016-12-22 03:32:01 +08:00
|
|
|
return (u64)get_tb();
|
2007-09-22 05:35:52 +08:00
|
|
|
}
|
|
|
|
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
|
|
|
|
void update_vsyscall(struct timekeeper *tk)
|
2007-09-22 05:35:52 +08:00
|
|
|
{
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
struct timespec xt;
|
|
|
|
struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
|
u32 mult = tk->tkr_mono.mult;
|
|
|
|
u32 shift = tk->tkr_mono.shift;
|
|
|
|
u64 cycle_last = tk->tkr_mono.cycle_last;
|
2010-07-14 08:56:21 +08:00
|
|
|
u64 new_tb_to_xs, new_stamp_xsec;
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
u64 frac_sec;
|
2007-09-22 05:35:52 +08:00
|
|
|
|
|
|
|
if (clock != &clocksource_timebase)
|
|
|
|
return;
|
|
|
|
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
xt.tv_sec = tk->xtime_sec;
|
|
|
|
xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
|
|
|
|
|
2007-09-22 05:35:52 +08:00
|
|
|
/* Make userspace gettimeofday spin until we're done. */
|
|
|
|
++vdso_data->tb_update_count;
|
|
|
|
smp_mb();
|
|
|
|
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
/*
|
|
|
|
* This computes ((2^20 / 1e9) * mult) >> shift as a
|
|
|
|
* 0.64 fixed-point fraction.
|
|
|
|
* The computation in the else clause below won't overflow
|
|
|
|
* (as long as the timebase frequency is >= 1.049 MHz)
|
|
|
|
* but loses precision because we lose the low bits of the constant
|
|
|
|
* in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
|
|
|
|
* For a shift of 24 the error is about 0.5e-9, or about 0.5ns
|
|
|
|
* over a second. (Shift values are usually 22, 23 or 24.)
|
|
|
|
* For high frequency clocks such as the 512MHz timebase clock
|
|
|
|
* on POWER[6789], the mult value is small (e.g. 32768000)
|
|
|
|
* and so we can shift the constant by 16 initially
|
|
|
|
* (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
|
|
|
|
* remaining shifts after the multiplication, which gives a
|
|
|
|
* more accurate result (e.g. with mult = 32768000, shift = 24,
|
|
|
|
* the error is only about 1.2e-12, or 0.7ns over 10 minutes).
|
|
|
|
*/
|
|
|
|
if (mult <= 62500000 && clock->shift >= 16)
|
|
|
|
new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
|
|
|
|
else
|
|
|
|
new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the fractional second in units of 2^-32 seconds.
|
|
|
|
* The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
|
|
|
|
* in nanoseconds, so multiplying that by 2^32 / 1e9 gives
|
|
|
|
* it in units of 2^-32 seconds.
|
|
|
|
* We assume shift <= 32 because clocks_calc_mult_shift()
|
|
|
|
* generates shift values in the range 0 - 32.
|
|
|
|
*/
|
|
|
|
frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
|
|
|
|
do_div(frac_sec, NSEC_PER_SEC);
|
2010-07-14 08:56:21 +08:00
|
|
|
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
/*
|
|
|
|
* Work out new stamp_xsec value for any legacy users of systemcfg.
|
|
|
|
* stamp_xsec is in units of 2^-20 seconds.
|
|
|
|
*/
|
|
|
|
new_stamp_xsec = frac_sec >> 12;
|
|
|
|
new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
|
2010-07-29 03:49:22 +08:00
|
|
|
|
2010-07-14 08:56:21 +08:00
|
|
|
/*
|
|
|
|
* tb_update_count is used to allow the userspace gettimeofday code
|
|
|
|
* to assure itself that it sees a consistent view of the tb_to_xs and
|
|
|
|
* stamp_xsec variables. It reads the tb_update_count, then reads
|
|
|
|
* tb_to_xs and stamp_xsec and then reads tb_update_count again. If
|
|
|
|
* the two values of tb_update_count match and are even then the
|
|
|
|
* tb_to_xs and stamp_xsec values are consistent. If not, then it
|
|
|
|
* loops back and reads them again until this criteria is met.
|
|
|
|
*/
|
2014-07-17 05:05:13 +08:00
|
|
|
vdso_data->tb_orig_stamp = cycle_last;
|
2010-07-14 08:56:21 +08:00
|
|
|
vdso_data->stamp_xsec = new_stamp_xsec;
|
|
|
|
vdso_data->tb_to_xs = new_tb_to_xs;
|
powerpc: Convert VDSO update function to use new update_vsyscall interface
This converts the powerpc VDSO time update function to use the new
interface introduced in commit 576094b7f0aa ("time: Introduce new
GENERIC_TIME_VSYSCALL", 2012-09-11). Where the old interface gave
us the time as of the last update in seconds and whole nanoseconds,
with the new interface we get the nanoseconds part effectively in
a binary fixed-point format with tk->tkr_mono.shift bits to the
right of the binary point.
With the old interface, the fractional nanoseconds got truncated,
meaning that the value returned by the VDSO clock_gettime function
would have about 1ns of jitter in it compared to the value computed
by the generic timekeeping code in the kernel.
The powerpc VDSO time functions (clock_gettime and gettimeofday)
already work in units of 2^-32 seconds, or 0.23283 ns, because that
makes it simple to split the result into seconds and fractional
seconds, and represent the fractional seconds in either microseconds
or nanoseconds. This is good enough accuracy for now, so this patch
avoids changing how the VDSO works or the interface in the VDSO data
page.
This patch converts the powerpc update_vsyscall_old to be called
update_vsyscall and use the new interface. We convert the fractional
second to units of 2^-32 seconds without truncating to whole nanoseconds.
(There is still a conversion to whole nanoseconds for any legacy users
of the vdso_data/systemcfg stamp_xtime field.)
In addition, this improves the accuracy of the computation of tb_to_xs
for those systems with high-frequency timebase clocks (>= 268.5 MHz)
by doing the right shift in two parts, one before the multiplication and
one after, rather than doing the right shift before the multiplication.
(We can't do all of the right shift after the multiplication unless we
use 128-bit arithmetic.)
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Acked-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-27 16:04:52 +08:00
|
|
|
vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
|
|
|
|
vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
|
|
|
vdso_data->stamp_xtime = xt;
|
powerpc: Rework VDSO gettimeofday to prevent time going backwards
Currently it is possible for userspace to see the result of
gettimeofday() going backwards by 1 microsecond, assuming that
userspace is using the gettimeofday() in the VDSO. The VDSO
gettimeofday() algorithm computes the time in "xsecs", which are
units of 2^-20 seconds, or approximately 0.954 microseconds,
using the algorithm
now = (timebase - tb_orig_stamp) * tb_to_xs + stamp_xsec
and then converts the time in xsecs to seconds and microseconds.
The kernel updates the tb_orig_stamp and stamp_xsec values every
tick in update_vsyscall(). If the length of the tick is not an
integer number of xsecs, then some precision is lost in converting
the current time to xsecs. For example, with CONFIG_HZ=1000, the
tick is 1ms long, which is 1048.576 xsecs. That means that
stamp_xsec will advance by either 1048 or 1049 on each tick.
With the right conditions, it is possible for userspace to get
(timebase - tb_orig_stamp) * tb_to_xs being 1049 if the kernel is
slightly late in updating the vdso_datapage, and then for stamp_xsec
to advance by 1048 when the kernel does update it, and for userspace
to then see (timebase - tb_orig_stamp) * tb_to_xs being zero due to
integer truncation. The result is that time appears to go backwards
by 1 microsecond.
To fix this we change the VDSO gettimeofday to use a new field in the
VDSO datapage which stores the nanoseconds part of the time as a
fractional number of seconds in a 0.32 binary fraction format.
(Or put another way, as a 32-bit number in units of 0.23283 ns.)
This is convenient because we can use the mulhwu instruction to
convert it to either microseconds or nanoseconds.
Since it turns out that computing the time of day using this new field
is simpler than either using stamp_xsec (as gettimeofday does) or
stamp_xtime.tv_nsec (as clock_gettime does), this converts both
gettimeofday and clock_gettime to use the new field. The existing
__do_get_tspec function is converted to use the new field and take
a parameter in r7 that indicates the desired resolution, 1,000,000
for microseconds or 1,000,000,000 for nanoseconds. The __do_get_xsec
function is then unused and is deleted.
The new algorithm is
now = ((timebase - tb_orig_stamp) << 12) * tb_to_xs
+ (stamp_xtime_seconds << 32) + stamp_sec_fraction
with 'now' in units of 2^-32 seconds. That is then converted to
seconds and either microseconds or nanoseconds with
seconds = now >> 32
partseconds = ((now & 0xffffffff) * resolution) >> 32
The 32-bit VDSO code also makes a further simplification: it ignores
the bottom 32 bits of the tb_to_xs value, which is a 0.64 format binary
fraction. Doing so gets rid of 4 multiply instructions. Assuming
a timebase frequency of 1GHz or less and an update interval of no
more than 10ms, the upper 32 bits of tb_to_xs will be at least
4503599, so the error from ignoring the low 32 bits will be at most
2.2ns, which is more than an order of magnitude less than the time
taken to do gettimeofday or clock_gettime on our fastest processors,
so there is no possibility of seeing inconsistent values due to this.
This also moves update_gtod() down next to its only caller, and makes
update_vsyscall use the time passed in via the wall_time argument rather
than accessing xtime directly. At present, wall_time always points to
xtime, but that could change in future.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-06-21 03:03:08 +08:00
|
|
|
vdso_data->stamp_sec_fraction = frac_sec;
|
2010-07-14 08:56:21 +08:00
|
|
|
smp_wmb();
|
|
|
|
++(vdso_data->tb_update_count);
|
2007-09-22 05:35:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void update_vsyscall_tz(void)
|
|
|
|
{
|
|
|
|
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
|
|
|
|
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
|
|
|
|
}
|
|
|
|
|
2008-05-08 12:27:19 +08:00
|
|
|
static void __init clocksource_init(void)
|
2007-09-22 05:35:52 +08:00
|
|
|
{
|
|
|
|
struct clocksource *clock;
|
|
|
|
|
|
|
|
if (__USE_RTC())
|
|
|
|
clock = &clocksource_rtc;
|
|
|
|
else
|
|
|
|
clock = &clocksource_timebase;
|
|
|
|
|
2011-11-24 04:07:19 +08:00
|
|
|
if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
|
2007-09-22 05:35:52 +08:00
|
|
|
printk(KERN_ERR "clocksource: %s is already registered\n",
|
|
|
|
clock->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
|
|
|
|
clock->name, clock->mult, clock->shift);
|
|
|
|
}
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
static int decrementer_set_next_event(unsigned long evt,
|
|
|
|
struct clock_event_device *dev)
|
|
|
|
{
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
|
2007-09-21 11:26:03 +08:00
|
|
|
set_dec(evt);
|
2014-01-14 14:11:39 +08:00
|
|
|
|
|
|
|
/* We may have raced with new irq work */
|
|
|
|
if (test_irq_work_pending())
|
|
|
|
set_dec(1);
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:26:25 +08:00
|
|
|
static int decrementer_shutdown(struct clock_event_device *dev)
|
2007-09-21 11:26:03 +08:00
|
|
|
{
|
2016-07-01 14:20:39 +08:00
|
|
|
decrementer_set_next_event(decrementer_max, dev);
|
2015-07-16 19:26:25 +08:00
|
|
|
return 0;
|
2007-09-21 11:26:03 +08:00
|
|
|
}
|
|
|
|
|
2014-02-26 08:07:43 +08:00
|
|
|
/* Interrupt handler for the timer broadcast IPI */
|
|
|
|
void tick_broadcast_ipi_handler(void)
|
|
|
|
{
|
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does
not have this change. Sorry about missing linuxppc-dev before.
V2->V2
- Fix up to work against 3.18-rc1
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
[mpe: Fix build errors caused by set/or_softirq_pending(), and rework
assignment in __set_breakpoint() to use memcpy().]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-22 04:23:25 +08:00
|
|
|
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
|
2014-02-26 08:08:01 +08:00
|
|
|
|
|
|
|
*next_tb = get_tb_or_rtc();
|
|
|
|
__timer_interrupt();
|
2014-02-26 08:07:43 +08:00
|
|
|
}
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
static void register_decrementer_clockevent(int cpu)
|
|
|
|
{
|
powerpc/time: Optimise decrementer_check_overflow
decrementer_check_overflow is called from arch_local_irq_restore so
we want to make it as light weight as possible. As such, turn
decrementer_check_overflow into an inline function.
To avoid a circular mess of includes, separate out the two components
of struct decrementer_clock and keep the struct clock_event_device
part local to time.c.
The fast path improves from:
arch_local_irq_restore
0: mflr r0
4: std r0,16(r1)
8: stdu r1,-112(r1)
c: stb r3,578(r13)
10: cmpdi cr7,r3,0
14: beq- cr7,24 <.arch_local_irq_restore+0x24>
...
24: addi r1,r1,112
28: ld r0,16(r1)
2c: mtlr r0
30: blr
to:
arch_local_irq_restore
0: std r30,-16(r1)
4: ld r30,0(r2)
8: stb r3,578(r13)
c: cmpdi cr7,r3,0
10: beq- cr7,6c <.arch_local_irq_restore+0x6c>
...
6c: ld r30,-16(r1)
70: blr
Unfortunately we still setup a local TOC (due to -mminimal-toc). Yet
another sign we should be moving to -mcmodel=medium.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-11-24 04:07:22 +08:00
|
|
|
struct clock_event_device *dec = &per_cpu(decrementers, cpu);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
|
|
|
*dec = decrementer_clockevent;
|
2008-12-13 18:50:26 +08:00
|
|
|
dec->cpumask = cpumask_of(cpu);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
2010-02-08 03:26:29 +08:00
|
|
|
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
|
|
|
|
dec->name, dec->mult, dec->shift, cpu);
|
2007-09-21 11:26:03 +08:00
|
|
|
|
|
|
|
clockevents_register_device(dec);
|
|
|
|
}
|
|
|
|
|
2016-07-01 14:20:39 +08:00
|
|
|
static void enable_large_decrementer(void)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're running as the hypervisor we need to enable the LD manually
|
|
|
|
* otherwise firmware should have done it for us.
|
|
|
|
*/
|
|
|
|
if (cpu_has_feature(CPU_FTR_HVMODE))
|
|
|
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init set_decrementer_max(void)
|
|
|
|
{
|
|
|
|
struct device_node *cpu;
|
|
|
|
u32 bits = 32;
|
|
|
|
|
|
|
|
/* Prior to ISAv3 the decrementer is always 32 bit */
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
|
|
return;
|
|
|
|
|
|
|
|
cpu = of_find_node_by_type(NULL, "cpu");
|
|
|
|
|
|
|
|
if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
|
|
|
|
if (bits > 64 || bits < 32) {
|
|
|
|
pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
|
|
|
|
bits = 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* calculate the signed maximum given this many bits */
|
|
|
|
decrementer_max = (1ul << (bits - 1)) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
of_node_put(cpu);
|
|
|
|
|
|
|
|
pr_info("time_init: %u bit decrementer (max: %llx)\n",
|
|
|
|
bits, decrementer_max);
|
|
|
|
}
|
|
|
|
|
2007-12-14 12:52:10 +08:00
|
|
|
static void __init init_decrementer_clockevent(void)
|
2007-09-21 11:26:03 +08:00
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
2011-11-24 04:07:18 +08:00
|
|
|
clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
decrementer_clockevent.max_delta_ns =
|
2016-07-01 14:20:39 +08:00
|
|
|
clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
|
2017-03-31 03:53:17 +08:00
|
|
|
decrementer_clockevent.max_delta_ticks = decrementer_max;
|
2007-10-31 19:25:35 +08:00
|
|
|
decrementer_clockevent.min_delta_ns =
|
|
|
|
clockevent_delta2ns(2, &decrementer_clockevent);
|
2017-03-31 03:53:17 +08:00
|
|
|
decrementer_clockevent.min_delta_ticks = 2;
|
2007-09-21 11:26:03 +08:00
|
|
|
|
|
|
|
register_decrementer_clockevent(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
void secondary_cpu_time_init(void)
|
|
|
|
{
|
2016-07-01 14:20:39 +08:00
|
|
|
/* Enable and test the large decrementer for this cpu */
|
|
|
|
enable_large_decrementer();
|
|
|
|
|
2009-08-28 12:25:04 +08:00
|
|
|
/* Start the decrementer on CPUs that have manual control
|
|
|
|
* such as BookE
|
|
|
|
*/
|
|
|
|
start_cpu_decrementer();
|
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
/* FIME: Should make unrelatred change to move snapshot_timebase
|
|
|
|
* call here ! */
|
|
|
|
register_decrementer_clockevent(smp_processor_id());
|
|
|
|
}
|
|
|
|
|
2005-10-20 07:23:26 +08:00
|
|
|
/* This function is only called on the boot processor */
|
2005-04-17 06:20:36 +08:00
|
|
|
void __init time_init(void)
|
|
|
|
{
|
|
|
|
struct div_result res;
|
2010-06-21 03:04:14 +08:00
|
|
|
u64 scale;
|
2005-10-20 07:23:26 +08:00
|
|
|
unsigned shift;
|
|
|
|
|
2005-10-23 15:14:56 +08:00
|
|
|
if (__USE_RTC()) {
|
|
|
|
/* 601 processor: dec counts down by 128 every 128ns */
|
|
|
|
ppc_tb_freq = 1000000000;
|
|
|
|
} else {
|
|
|
|
/* Normal PowerPC with timebase register */
|
|
|
|
ppc_md.calibrate_decr();
|
2006-04-13 04:20:27 +08:00
|
|
|
printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
|
2005-10-23 15:14:56 +08:00
|
|
|
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
|
2006-04-13 04:20:27 +08:00
|
|
|
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
|
2005-10-23 15:14:56 +08:00
|
|
|
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
|
|
|
|
}
|
2005-10-20 19:04:51 +08:00
|
|
|
|
|
|
|
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
|
2006-02-20 07:38:56 +08:00
|
|
|
tb_ticks_per_sec = ppc_tb_freq;
|
2005-10-20 19:04:51 +08:00
|
|
|
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-24 07:06:59 +08:00
|
|
|
calc_cputime_factors();
|
2006-02-20 07:38:56 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Compute scale factor for sched_clock.
|
|
|
|
* The calibrate_decr() function has set tb_ticks_per_sec,
|
|
|
|
* which is the timebase frequency.
|
|
|
|
* We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
|
|
|
|
* the 128-bit result as a 64.64 fixed-point number.
|
|
|
|
* We then shift that number right until it is less than 1.0,
|
|
|
|
* giving us the scale factor and shift count to use in
|
|
|
|
* sched_clock().
|
|
|
|
*/
|
|
|
|
div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
|
|
|
|
scale = res.result_low;
|
|
|
|
for (shift = 0; res.result_high != 0; ++shift) {
|
|
|
|
scale = (scale >> 1) | (res.result_high << 63);
|
|
|
|
res.result_high >>= 1;
|
|
|
|
}
|
|
|
|
tb_to_ns_scale = scale;
|
|
|
|
tb_to_ns_shift = shift;
|
2007-07-04 12:04:31 +08:00
|
|
|
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
|
2007-09-19 12:21:56 +08:00
|
|
|
boot_tb = get_tb_or_rtc();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-02-20 07:38:56 +08:00
|
|
|
/* If platform provided a timezone (pmac), we correct the time */
|
2011-11-24 04:07:21 +08:00
|
|
|
if (timezone_offset) {
|
2006-02-20 07:38:56 +08:00
|
|
|
sys_tz.tz_minuteswest = -timezone_offset / 60;
|
|
|
|
sys_tz.tz_dsttime = 0;
|
2011-11-24 04:07:21 +08:00
|
|
|
}
|
2006-02-20 07:38:56 +08:00
|
|
|
|
2005-11-11 18:15:21 +08:00
|
|
|
vdso_data->tb_update_count = 0;
|
|
|
|
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-07-01 14:20:39 +08:00
|
|
|
/* initialise and enable the large decrementer (if we have one) */
|
|
|
|
set_decrementer_max();
|
|
|
|
enable_large_decrementer();
|
|
|
|
|
2009-08-28 12:25:04 +08:00
|
|
|
/* Start the decrementer on CPUs that have manual control
|
|
|
|
* such as BookE
|
|
|
|
*/
|
|
|
|
start_cpu_decrementer();
|
|
|
|
|
2012-03-16 02:18:00 +08:00
|
|
|
/* Register the clocksource */
|
|
|
|
clocksource_init();
|
2007-09-22 05:35:52 +08:00
|
|
|
|
2007-09-21 11:26:03 +08:00
|
|
|
init_decrementer_clockevent();
|
2014-02-26 08:09:06 +08:00
|
|
|
tick_setup_hrtimer_broadcast();
|
2014-12-03 16:53:52 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_COMMON_CLK
|
|
|
|
of_clk_init(NULL);
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define FEBRUARY 2
|
|
|
|
#define STARTOFTIME 1970
|
|
|
|
#define SECDAY 86400L
|
|
|
|
#define SECYR (SECDAY * 365)
|
2005-10-20 07:23:26 +08:00
|
|
|
#define leapyear(year) ((year) % 4 == 0 && \
|
|
|
|
((year) % 100 != 0 || (year) % 400 == 0))
|
2005-04-17 06:20:36 +08:00
|
|
|
#define days_in_year(a) (leapyear(a) ? 366 : 365)
|
|
|
|
#define days_in_month(a) (month_days[(a) - 1])
|
|
|
|
|
|
|
|
static int month_days[12] = {
|
|
|
|
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
|
|
|
|
};
|
|
|
|
|
|
|
|
void to_tm(int tim, struct rtc_time * tm)
|
|
|
|
{
|
|
|
|
register int i;
|
|
|
|
register long hms, day;
|
|
|
|
|
|
|
|
day = tim / SECDAY;
|
|
|
|
hms = tim % SECDAY;
|
|
|
|
|
|
|
|
/* Hours, minutes, seconds are easy */
|
|
|
|
tm->tm_hour = hms / 3600;
|
|
|
|
tm->tm_min = (hms % 3600) / 60;
|
|
|
|
tm->tm_sec = (hms % 3600) % 60;
|
|
|
|
|
|
|
|
/* Number of years in days */
|
|
|
|
for (i = STARTOFTIME; day >= days_in_year(i); i++)
|
|
|
|
day -= days_in_year(i);
|
|
|
|
tm->tm_year = i;
|
|
|
|
|
|
|
|
/* Number of months in days left */
|
|
|
|
if (leapyear(tm->tm_year))
|
|
|
|
days_in_month(FEBRUARY) = 29;
|
|
|
|
for (i = 1; day >= days_in_month(i); i++)
|
|
|
|
day -= days_in_month(i);
|
|
|
|
days_in_month(FEBRUARY) = 28;
|
|
|
|
tm->tm_mon = i;
|
|
|
|
|
|
|
|
/* Days are what is left over (+1) from all that. */
|
|
|
|
tm->tm_mday = day + 1;
|
|
|
|
|
|
|
|
/*
|
2015-12-15 15:09:14 +08:00
|
|
|
* No-one uses the day of the week.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-12-15 15:09:14 +08:00
|
|
|
tm->tm_wday = -1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2014-08-20 06:00:02 +08:00
|
|
|
EXPORT_SYMBOL(to_tm);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
|
|
|
|
* result.
|
|
|
|
*/
|
2005-10-20 07:23:26 +08:00
|
|
|
void div128_by_32(u64 dividend_high, u64 dividend_low,
|
|
|
|
unsigned divisor, struct div_result *dr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-20 07:23:26 +08:00
|
|
|
unsigned long a, b, c, d;
|
|
|
|
unsigned long w, x, y, z;
|
|
|
|
u64 ra, rb, rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
a = dividend_high >> 32;
|
|
|
|
b = dividend_high & 0xffffffff;
|
|
|
|
c = dividend_low >> 32;
|
|
|
|
d = dividend_low & 0xffffffff;
|
|
|
|
|
2005-10-20 07:23:26 +08:00
|
|
|
w = a / divisor;
|
|
|
|
ra = ((u64)(a - (w * divisor)) << 32) + b;
|
|
|
|
|
|
|
|
rb = ((u64) do_div(ra, divisor) << 32) + c;
|
|
|
|
x = ra;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-20 07:23:26 +08:00
|
|
|
rc = ((u64) do_div(rb, divisor) << 32) + d;
|
|
|
|
y = rb;
|
|
|
|
|
|
|
|
do_div(rc, divisor);
|
|
|
|
z = rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-20 07:23:26 +08:00
|
|
|
dr->result_high = ((u64)w << 32) + x;
|
|
|
|
dr->result_low = ((u64)y << 32) + z;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
}
|
2009-02-19 23:50:46 +08:00
|
|
|
|
2009-06-10 05:12:00 +08:00
|
|
|
/* We don't need to calibrate delay, we use the CPU timebase for that */
|
|
|
|
void calibrate_delay(void)
|
|
|
|
{
|
|
|
|
/* Some generic code (such as spinlock debug) use loops_per_jiffy
|
|
|
|
* as the number of __delay(1) in a jiffy, so make it so
|
|
|
|
*/
|
|
|
|
loops_per_jiffy = tb_ticks_per_jiffy;
|
|
|
|
}
|
|
|
|
|
2016-05-31 02:58:00 +08:00
|
|
|
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
|
|
|
|
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
|
|
|
|
{
|
|
|
|
ppc_md.get_rtc_time(tm);
|
2018-02-22 05:46:33 +08:00
|
|
|
return 0;
|
2016-05-31 02:58:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
|
|
|
|
{
|
|
|
|
if (!ppc_md.set_rtc_time)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (ppc_md.set_rtc_time(tm) < 0)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rtc_class_ops rtc_generic_ops = {
|
|
|
|
.read_time = rtc_generic_get_time,
|
|
|
|
.set_time = rtc_generic_set_time,
|
|
|
|
};
|
|
|
|
|
2009-02-19 23:50:46 +08:00
|
|
|
static int __init rtc_init(void)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
|
|
|
if (!ppc_md.get_rtc_time)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-05-31 02:58:00 +08:00
|
|
|
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
|
|
|
|
&rtc_generic_ops,
|
|
|
|
sizeof(rtc_generic_ops));
|
2009-02-19 23:50:46 +08:00
|
|
|
|
2013-07-15 09:50:32 +08:00
|
|
|
return PTR_ERR_OR_ZERO(pdev);
|
2009-02-19 23:50:46 +08:00
|
|
|
}
|
|
|
|
|
2015-05-02 08:05:49 +08:00
|
|
|
device_initcall(rtc_init);
|
2016-05-31 02:58:00 +08:00
|
|
|
#endif
|