Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes from Thomas Gleixner: "This assorted collection provides: - A new timer based timer broadcast feature for systems which do not provide a global accessible timer device. That allows those systems to put CPUs into deep idle states where the per cpu timer device stops. - A few NOHZ_FULL related improvements to the timer wheel - The usual updates to timer devices found in ARM SoCs - Small improvements and updates all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits) tick: Remove code duplication in tick_handle_periodic() tick: Fix spelling mistake in tick_handle_periodic() x86: hpet: Use proper destructor for delayed work workqueue: Provide destroy_delayed_work_on_stack() clocksource: CMT, MTU2, TMU and STI should depend on GENERIC_CLOCKEVENTS timer: Remove code redundancy while calling get_nohz_timer_target() hrtimer: Rearrange comments in the order struct members are declared timer: Use variable head instead of &work_list in __run_timers() clocksource: exynos_mct: silence a static checker warning arm: zynq: Add support for cpufreq arm: zynq: Don't use arm_global_timer with cpufreq clocksource/cadence_ttc: Overhaul clocksource frequency adjustment clocksource/cadence_ttc: Call clockevents_update_freq() with IRQs enabled clocksource: Add Kconfig entries for CMT, MTU2, TMU and STI sh: Remove Kconfig entries for TMU, CMT and MTU2 ARM: shmobile: Remove CMT, TMU and STI Kconfig entries clocksource: armada-370-xp: Use atomic access for shared registers clocksource: orion: Use atomic access for shared registers clocksource: timer-keystone: Delete unnecessary variable clocksource: timer-keystone: introduce clocksource driver for Keystone ...
This commit is contained in:
commit
1ead658124
|
@ -2,7 +2,7 @@ Allwinner A1X SoCs Timer Controller
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
|
|
||||||
- compatible : should be "allwinner,sun4i-timer"
|
- compatible : should be "allwinner,sun4i-a10-timer"
|
||||||
- reg : Specifies base physical address and size of the registers.
|
- reg : Specifies base physical address and size of the registers.
|
||||||
- interrupts : The interrupt of the first timer
|
- interrupts : The interrupt of the first timer
|
||||||
- clocks: phandle to the source clock (usually a 24 MHz fixed clock)
|
- clocks: phandle to the source clock (usually a 24 MHz fixed clock)
|
||||||
|
@ -10,7 +10,7 @@ Required properties:
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
timer {
|
timer {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0x400>;
|
reg = <0x01c20c00 0x400>;
|
||||||
interrupts = <22>;
|
interrupts = <22>;
|
||||||
clocks = <&osc>;
|
clocks = <&osc>;
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
* Device tree bindings for Texas instruments Keystone timer
|
||||||
|
|
||||||
|
This document provides bindings for the 64-bit timer in the KeyStone
|
||||||
|
architecture devices. The timer can be configured as a general-purpose 64-bit
|
||||||
|
timer, dual general-purpose 32-bit timers. When configured as dual 32-bit
|
||||||
|
timers, each half can operate in conjunction (chain mode) or independently
|
||||||
|
(unchained mode) of each other.
|
||||||
|
|
||||||
|
It is global timer is a free running up-counter and can generate interrupt
|
||||||
|
when the counter reaches preset counter values.
|
||||||
|
|
||||||
|
Documentation:
|
||||||
|
http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
|
||||||
|
- compatible : should be "ti,keystone-timer".
|
||||||
|
- reg : specifies base physical address and count of the registers.
|
||||||
|
- interrupts : interrupt generated by the timer.
|
||||||
|
- clocks : the clock feeding the timer clock.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
timer@22f0000 {
|
||||||
|
compatible = "ti,keystone-timer";
|
||||||
|
reg = <0x022f0000 0x80>;
|
||||||
|
interrupts = <GIC_SPI 110 IRQ_TYPE_EDGE_RISING>;
|
||||||
|
clocks = <&clktimer15>;
|
||||||
|
};
|
|
@ -1320,6 +1320,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/arm/mach-u300/
|
F: arch/arm/mach-u300/
|
||||||
|
F: drivers/clocksource/timer-u300.c
|
||||||
F: drivers/i2c/busses/i2c-stu300.c
|
F: drivers/i2c/busses/i2c-stu300.c
|
||||||
F: drivers/rtc/rtc-coh901331.c
|
F: drivers/rtc/rtc-coh901331.c
|
||||||
F: drivers/watchdog/coh901327_wdt.c
|
F: drivers/watchdog/coh901327_wdt.c
|
||||||
|
|
|
@ -403,7 +403,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
timer@01c20c00 {
|
timer@01c20c00 {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0x90>;
|
reg = <0x01c20c00 0x90>;
|
||||||
interrupts = <22>;
|
interrupts = <22>;
|
||||||
clocks = <&osc24M>;
|
clocks = <&osc24M>;
|
||||||
|
|
|
@ -366,7 +366,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
timer@01c20c00 {
|
timer@01c20c00 {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0x90>;
|
reg = <0x01c20c00 0x90>;
|
||||||
interrupts = <22>;
|
interrupts = <22>;
|
||||||
clocks = <&osc24M>;
|
clocks = <&osc24M>;
|
||||||
|
|
|
@ -329,7 +329,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
timer@01c20c00 {
|
timer@01c20c00 {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0x90>;
|
reg = <0x01c20c00 0x90>;
|
||||||
interrupts = <22>;
|
interrupts = <22>;
|
||||||
clocks = <&osc24M>;
|
clocks = <&osc24M>;
|
||||||
|
|
|
@ -231,7 +231,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
timer@01c20c00 {
|
timer@01c20c00 {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0xa0>;
|
reg = <0x01c20c00 0xa0>;
|
||||||
interrupts = <0 18 4>,
|
interrupts = <0 18 4>,
|
||||||
<0 19 4>,
|
<0 19 4>,
|
||||||
|
|
|
@ -435,7 +435,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
timer@01c20c00 {
|
timer@01c20c00 {
|
||||||
compatible = "allwinner,sun4i-timer";
|
compatible = "allwinner,sun4i-a10-timer";
|
||||||
reg = <0x01c20c00 0x90>;
|
reg = <0x01c20c00 0x90>;
|
||||||
interrupts = <0 22 4>,
|
interrupts = <0 22 4>,
|
||||||
<0 23 4>,
|
<0 23 4>,
|
||||||
|
|
|
@ -24,6 +24,12 @@
|
||||||
device_type = "cpu";
|
device_type = "cpu";
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
clocks = <&clkc 3>;
|
clocks = <&clkc 3>;
|
||||||
|
operating-points = <
|
||||||
|
/* kHz uV */
|
||||||
|
666667 1000000
|
||||||
|
333334 1000000
|
||||||
|
222223 1000000
|
||||||
|
>;
|
||||||
};
|
};
|
||||||
|
|
||||||
cpu@1 {
|
cpu@1 {
|
||||||
|
|
|
@ -24,17 +24,21 @@ comment "Renesas ARM SoCs System Type"
|
||||||
|
|
||||||
config ARCH_EMEV2
|
config ARCH_EMEV2
|
||||||
bool "Emma Mobile EV2"
|
bool "Emma Mobile EV2"
|
||||||
|
select SYS_SUPPORTS_EM_STI
|
||||||
|
|
||||||
config ARCH_R7S72100
|
config ARCH_R7S72100
|
||||||
bool "RZ/A1H (R7S72100)"
|
bool "RZ/A1H (R7S72100)"
|
||||||
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
|
|
||||||
config ARCH_R8A7790
|
config ARCH_R8A7790
|
||||||
bool "R-Car H2 (R8A77900)"
|
bool "R-Car H2 (R8A77900)"
|
||||||
select RENESAS_IRQC
|
select RENESAS_IRQC
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
config ARCH_R8A7791
|
config ARCH_R8A7791
|
||||||
bool "R-Car M2 (R8A77910)"
|
bool "R-Car M2 (R8A77910)"
|
||||||
select RENESAS_IRQC
|
select RENESAS_IRQC
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
comment "Renesas ARM SoCs Board Type"
|
comment "Renesas ARM SoCs Board Type"
|
||||||
|
|
||||||
|
@ -68,6 +72,8 @@ config ARCH_SH7372
|
||||||
select ARM_CPU_SUSPEND if PM || CPU_IDLE
|
select ARM_CPU_SUSPEND if PM || CPU_IDLE
|
||||||
select CPU_V7
|
select CPU_V7
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_SH73A0
|
config ARCH_SH73A0
|
||||||
bool "SH-Mobile AG5 (R8A73A00)"
|
bool "SH-Mobile AG5 (R8A73A00)"
|
||||||
|
@ -77,6 +83,8 @@ config ARCH_SH73A0
|
||||||
select I2C
|
select I2C
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
select RENESAS_INTC_IRQPIN
|
select RENESAS_INTC_IRQPIN
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_R8A73A4
|
config ARCH_R8A73A4
|
||||||
bool "R-Mobile APE6 (R8A73A40)"
|
bool "R-Mobile APE6 (R8A73A40)"
|
||||||
|
@ -87,6 +95,8 @@ config ARCH_R8A73A4
|
||||||
select RENESAS_IRQC
|
select RENESAS_IRQC
|
||||||
select ARCH_HAS_CPUFREQ
|
select ARCH_HAS_CPUFREQ
|
||||||
select ARCH_HAS_OPP
|
select ARCH_HAS_OPP
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_R8A7740
|
config ARCH_R8A7740
|
||||||
bool "R-Mobile A1 (R8A77400)"
|
bool "R-Mobile A1 (R8A77400)"
|
||||||
|
@ -95,6 +105,8 @@ config ARCH_R8A7740
|
||||||
select CPU_V7
|
select CPU_V7
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
select RENESAS_INTC_IRQPIN
|
select RENESAS_INTC_IRQPIN
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_R8A7778
|
config ARCH_R8A7778
|
||||||
bool "R-Car M1A (R8A77781)"
|
bool "R-Car M1A (R8A77781)"
|
||||||
|
@ -104,6 +116,7 @@ config ARCH_R8A7778
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select USB_ARCH_HAS_EHCI
|
select USB_ARCH_HAS_EHCI
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_R8A7779
|
config ARCH_R8A7779
|
||||||
bool "R-Car H1 (R8A77790)"
|
bool "R-Car H1 (R8A77790)"
|
||||||
|
@ -114,6 +127,7 @@ config ARCH_R8A7779
|
||||||
select USB_ARCH_HAS_EHCI
|
select USB_ARCH_HAS_EHCI
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
select RENESAS_INTC_IRQPIN
|
select RENESAS_INTC_IRQPIN
|
||||||
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config ARCH_R8A7790
|
config ARCH_R8A7790
|
||||||
bool "R-Car H2 (R8A77900)"
|
bool "R-Car H2 (R8A77900)"
|
||||||
|
@ -123,6 +137,7 @@ config ARCH_R8A7790
|
||||||
select MIGHT_HAVE_PCI
|
select MIGHT_HAVE_PCI
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
select RENESAS_IRQC
|
select RENESAS_IRQC
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
config ARCH_R8A7791
|
config ARCH_R8A7791
|
||||||
bool "R-Car M2 (R8A77910)"
|
bool "R-Car M2 (R8A77910)"
|
||||||
|
@ -132,6 +147,7 @@ config ARCH_R8A7791
|
||||||
select MIGHT_HAVE_PCI
|
select MIGHT_HAVE_PCI
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
select RENESAS_IRQC
|
select RENESAS_IRQC
|
||||||
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
config ARCH_EMEV2
|
config ARCH_EMEV2
|
||||||
bool "Emma Mobile EV2"
|
bool "Emma Mobile EV2"
|
||||||
|
@ -141,6 +157,7 @@ config ARCH_EMEV2
|
||||||
select MIGHT_HAVE_PCI
|
select MIGHT_HAVE_PCI
|
||||||
select USE_OF
|
select USE_OF
|
||||||
select AUTO_ZRELADDR
|
select AUTO_ZRELADDR
|
||||||
|
select SYS_SUPPORTS_EM_STI
|
||||||
|
|
||||||
config ARCH_R7S72100
|
config ARCH_R7S72100
|
||||||
bool "RZ/A1H (R7S72100)"
|
bool "RZ/A1H (R7S72100)"
|
||||||
|
@ -148,6 +165,7 @@ config ARCH_R7S72100
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select CPU_V7
|
select CPU_V7
|
||||||
select SH_CLK_CPG
|
select SH_CLK_CPG
|
||||||
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
|
|
||||||
comment "Renesas ARM SoCs Board Type"
|
comment "Renesas ARM SoCs Board Type"
|
||||||
|
|
||||||
|
@ -321,24 +339,6 @@ config SHMOBILE_TIMER_HZ
|
||||||
want to select a HZ value such as 128 that can evenly divide RCLK.
|
want to select a HZ value such as 128 that can evenly divide RCLK.
|
||||||
A HZ value that does not divide evenly may cause timer drift.
|
A HZ value that does not divide evenly may cause timer drift.
|
||||||
|
|
||||||
config SH_TIMER_CMT
|
|
||||||
bool "CMT timer driver"
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables build of the CMT timer driver.
|
|
||||||
|
|
||||||
config SH_TIMER_TMU
|
|
||||||
bool "TMU timer driver"
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables build of the TMU timer driver.
|
|
||||||
|
|
||||||
config EM_TIMER_STI
|
|
||||||
bool "STI timer driver"
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables build of the STI timer driver.
|
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Makefile for the linux kernel, U300 machine.
|
# Makefile for the linux kernel, U300 machine.
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-y := core.o timer.o
|
obj-y := core.o
|
||||||
obj-m :=
|
obj-m :=
|
||||||
obj-n :=
|
obj-n :=
|
||||||
obj- :=
|
obj- :=
|
||||||
|
|
|
@ -2,6 +2,8 @@ config ARCH_ZYNQ
|
||||||
bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
|
bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
|
||||||
select ARM_AMBA
|
select ARM_AMBA
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
|
select ARCH_HAS_CPUFREQ
|
||||||
|
select ARCH_HAS_OPP
|
||||||
select COMMON_CLK
|
select COMMON_CLK
|
||||||
select CPU_V7
|
select CPU_V7
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
|
@ -13,6 +15,6 @@ config ARCH_ZYNQ
|
||||||
select HAVE_SMP
|
select HAVE_SMP
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select CADENCE_TTC_TIMER
|
select CADENCE_TTC_TIMER
|
||||||
select ARM_GLOBAL_TIMER
|
select ARM_GLOBAL_TIMER if !CPU_FREQ
|
||||||
help
|
help
|
||||||
Support for Xilinx Zynq ARM Cortex A9 Platform
|
Support for Xilinx Zynq ARM Cortex A9 Platform
|
||||||
|
|
|
@ -64,6 +64,8 @@ static struct platform_device zynq_cpuidle_device = {
|
||||||
*/
|
*/
|
||||||
static void __init zynq_init_machine(void)
|
static void __init zynq_init_machine(void)
|
||||||
{
|
{
|
||||||
|
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 64KB way size, 8-way associativity, parity disabled
|
* 64KB way size, 8-way associativity, parity disabled
|
||||||
*/
|
*/
|
||||||
|
@ -72,6 +74,7 @@ static void __init zynq_init_machine(void)
|
||||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||||
|
|
||||||
platform_device_register(&zynq_cpuidle_device);
|
platform_device_register(&zynq_cpuidle_device);
|
||||||
|
platform_device_register_full(&devinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init zynq_timer_init(void)
|
static void __init zynq_timer_init(void)
|
||||||
|
|
|
@ -123,15 +123,6 @@ config SYS_SUPPORTS_NUMA
|
||||||
config SYS_SUPPORTS_PCI
|
config SYS_SUPPORTS_PCI
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config SYS_SUPPORTS_CMT
|
|
||||||
bool
|
|
||||||
|
|
||||||
config SYS_SUPPORTS_MTU2
|
|
||||||
bool
|
|
||||||
|
|
||||||
config SYS_SUPPORTS_TMU
|
|
||||||
bool
|
|
||||||
|
|
||||||
config STACKTRACE_SUPPORT
|
config STACKTRACE_SUPPORT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
@ -191,14 +182,14 @@ config CPU_SH3
|
||||||
bool
|
bool
|
||||||
select CPU_HAS_INTEVT
|
select CPU_HAS_INTEVT
|
||||||
select CPU_HAS_SR_RB
|
select CPU_HAS_SR_RB
|
||||||
select SYS_SUPPORTS_TMU
|
select SYS_SUPPORTS_SH_TMU
|
||||||
|
|
||||||
config CPU_SH4
|
config CPU_SH4
|
||||||
bool
|
bool
|
||||||
select CPU_HAS_INTEVT
|
select CPU_HAS_INTEVT
|
||||||
select CPU_HAS_SR_RB
|
select CPU_HAS_SR_RB
|
||||||
select CPU_HAS_FPU if !CPU_SH4AL_DSP
|
select CPU_HAS_FPU if !CPU_SH4AL_DSP
|
||||||
select SYS_SUPPORTS_TMU
|
select SYS_SUPPORTS_SH_TMU
|
||||||
select SYS_SUPPORTS_HUGETLBFS if MMU
|
select SYS_SUPPORTS_HUGETLBFS if MMU
|
||||||
|
|
||||||
config CPU_SH4A
|
config CPU_SH4A
|
||||||
|
@ -213,7 +204,7 @@ config CPU_SH4AL_DSP
|
||||||
config CPU_SH5
|
config CPU_SH5
|
||||||
bool
|
bool
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_TMU
|
select SYS_SUPPORTS_SH_TMU
|
||||||
select SYS_SUPPORTS_HUGETLBFS if MMU
|
select SYS_SUPPORTS_HUGETLBFS if MMU
|
||||||
|
|
||||||
config CPU_SHX2
|
config CPU_SHX2
|
||||||
|
@ -250,7 +241,7 @@ choice
|
||||||
config CPU_SUBTYPE_SH7619
|
config CPU_SUBTYPE_SH7619
|
||||||
bool "Support SH7619 processor"
|
bool "Support SH7619 processor"
|
||||||
select CPU_SH2
|
select CPU_SH2
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
# SH-2A Processor Support
|
# SH-2A Processor Support
|
||||||
|
|
||||||
|
@ -258,50 +249,50 @@ config CPU_SUBTYPE_SH7201
|
||||||
bool "Support SH7201 processor"
|
bool "Support SH7201 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7203
|
config CPU_SUBTYPE_SH7203
|
||||||
bool "Support SH7203 processor"
|
bool "Support SH7203 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7206
|
config CPU_SUBTYPE_SH7206
|
||||||
bool "Support SH7206 processor"
|
bool "Support SH7206 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7263
|
config CPU_SUBTYPE_SH7263
|
||||||
bool "Support SH7263 processor"
|
bool "Support SH7263 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7264
|
config CPU_SUBTYPE_SH7264
|
||||||
bool "Support SH7264 processor"
|
bool "Support SH7264 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7269
|
config CPU_SUBTYPE_SH7269
|
||||||
bool "Support SH7269 processor"
|
bool "Support SH7269 processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select CPU_HAS_FPU
|
select CPU_HAS_FPU
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
|
||||||
config CPU_SUBTYPE_MXG
|
config CPU_SUBTYPE_MXG
|
||||||
bool "Support MX-G processor"
|
bool "Support MX-G processor"
|
||||||
select CPU_SH2A
|
select CPU_SH2A
|
||||||
select SYS_SUPPORTS_MTU2
|
select SYS_SUPPORTS_SH_MTU2
|
||||||
help
|
help
|
||||||
Select MX-G if running on an R8A03022BG part.
|
Select MX-G if running on an R8A03022BG part.
|
||||||
|
|
||||||
|
@ -354,7 +345,7 @@ config CPU_SUBTYPE_SH7720
|
||||||
bool "Support SH7720 processor"
|
bool "Support SH7720 processor"
|
||||||
select CPU_SH3
|
select CPU_SH3
|
||||||
select CPU_HAS_DSP
|
select CPU_HAS_DSP
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
select USB_OHCI_SH if USB_OHCI_HCD
|
select USB_OHCI_SH if USB_OHCI_HCD
|
||||||
|
@ -366,7 +357,7 @@ config CPU_SUBTYPE_SH7721
|
||||||
bool "Support SH7721 processor"
|
bool "Support SH7721 processor"
|
||||||
select CPU_SH3
|
select CPU_SH3
|
||||||
select CPU_HAS_DSP
|
select CPU_HAS_DSP
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
select USB_OHCI_SH if USB_OHCI_HCD
|
select USB_OHCI_SH if USB_OHCI_HCD
|
||||||
help
|
help
|
||||||
|
@ -422,7 +413,7 @@ config CPU_SUBTYPE_SH7723
|
||||||
select CPU_SHX2
|
select CPU_SHX2
|
||||||
select ARCH_SHMOBILE
|
select ARCH_SHMOBILE
|
||||||
select ARCH_SPARSEMEM_ENABLE
|
select ARCH_SPARSEMEM_ENABLE
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
help
|
help
|
||||||
|
@ -434,7 +425,7 @@ config CPU_SUBTYPE_SH7724
|
||||||
select CPU_SHX2
|
select CPU_SHX2
|
||||||
select ARCH_SHMOBILE
|
select ARCH_SHMOBILE
|
||||||
select ARCH_SPARSEMEM_ENABLE
|
select ARCH_SPARSEMEM_ENABLE
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
help
|
help
|
||||||
|
@ -514,7 +505,7 @@ config CPU_SUBTYPE_SH7343
|
||||||
bool "Support SH7343 processor"
|
bool "Support SH7343 processor"
|
||||||
select CPU_SH4AL_DSP
|
select CPU_SH4AL_DSP
|
||||||
select ARCH_SHMOBILE
|
select ARCH_SHMOBILE
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
config CPU_SUBTYPE_SH7722
|
config CPU_SUBTYPE_SH7722
|
||||||
bool "Support SH7722 processor"
|
bool "Support SH7722 processor"
|
||||||
|
@ -523,7 +514,7 @@ config CPU_SUBTYPE_SH7722
|
||||||
select ARCH_SHMOBILE
|
select ARCH_SHMOBILE
|
||||||
select ARCH_SPARSEMEM_ENABLE
|
select ARCH_SPARSEMEM_ENABLE
|
||||||
select SYS_SUPPORTS_NUMA
|
select SYS_SUPPORTS_NUMA
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
|
||||||
|
@ -534,7 +525,7 @@ config CPU_SUBTYPE_SH7366
|
||||||
select ARCH_SHMOBILE
|
select ARCH_SHMOBILE
|
||||||
select ARCH_SPARSEMEM_ENABLE
|
select ARCH_SPARSEMEM_ENABLE
|
||||||
select SYS_SUPPORTS_NUMA
|
select SYS_SUPPORTS_NUMA
|
||||||
select SYS_SUPPORTS_CMT
|
select SYS_SUPPORTS_SH_CMT
|
||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
|
@ -567,27 +558,6 @@ source "arch/sh/boards/Kconfig"
|
||||||
|
|
||||||
menu "Timer and clock configuration"
|
menu "Timer and clock configuration"
|
||||||
|
|
||||||
config SH_TIMER_TMU
|
|
||||||
bool "TMU timer driver"
|
|
||||||
depends on SYS_SUPPORTS_TMU
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables the build of the TMU timer driver.
|
|
||||||
|
|
||||||
config SH_TIMER_CMT
|
|
||||||
bool "CMT timer driver"
|
|
||||||
depends on SYS_SUPPORTS_CMT
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables build of the CMT timer driver.
|
|
||||||
|
|
||||||
config SH_TIMER_MTU2
|
|
||||||
bool "MTU2 timer driver"
|
|
||||||
depends on SYS_SUPPORTS_MTU2
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
This enables build of the MTU2 timer driver.
|
|
||||||
|
|
||||||
config SH_PCLK_FREQ
|
config SH_PCLK_FREQ
|
||||||
int "Peripheral clock frequency (in Hz)"
|
int "Peripheral clock frequency (in Hz)"
|
||||||
depends on SH_CLK_CPG_LEGACY
|
depends on SH_CLK_CPG_LEGACY
|
||||||
|
|
|
@ -699,7 +699,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
|
||||||
/* FIXME: add schedule_work_on() */
|
/* FIXME: add schedule_work_on() */
|
||||||
schedule_delayed_work_on(cpu, &work.work, 0);
|
schedule_delayed_work_on(cpu, &work.work, 0);
|
||||||
wait_for_completion(&work.complete);
|
wait_for_completion(&work.complete);
|
||||||
destroy_timer_on_stack(&work.work.timer);
|
destroy_delayed_work_on_stack(&work.work);
|
||||||
break;
|
break;
|
||||||
case CPU_DEAD:
|
case CPU_DEAD:
|
||||||
if (hdev) {
|
if (hdev) {
|
||||||
|
|
|
@ -140,3 +140,51 @@ config VF_PIT_TIMER
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
|
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
|
||||||
|
|
||||||
|
config SYS_SUPPORTS_SH_CMT
|
||||||
|
bool
|
||||||
|
|
||||||
|
config SYS_SUPPORTS_SH_MTU2
|
||||||
|
bool
|
||||||
|
|
||||||
|
config SYS_SUPPORTS_SH_TMU
|
||||||
|
bool
|
||||||
|
|
||||||
|
config SYS_SUPPORTS_EM_STI
|
||||||
|
bool
|
||||||
|
|
||||||
|
config SH_TIMER_CMT
|
||||||
|
bool "Renesas CMT timer driver" if COMPILE_TEST
|
||||||
|
depends on GENERIC_CLOCKEVENTS
|
||||||
|
default SYS_SUPPORTS_SH_CMT
|
||||||
|
help
|
||||||
|
This enables build of a clocksource and clockevent driver for
|
||||||
|
the Compare Match Timer (CMT) hardware available in 16/32/48-bit
|
||||||
|
variants on a wide range of Mobile and Automotive SoCs from Renesas.
|
||||||
|
|
||||||
|
config SH_TIMER_MTU2
|
||||||
|
bool "Renesas MTU2 timer driver" if COMPILE_TEST
|
||||||
|
depends on GENERIC_CLOCKEVENTS
|
||||||
|
default SYS_SUPPORTS_SH_MTU2
|
||||||
|
help
|
||||||
|
This enables build of a clockevent driver for the Multi-Function
|
||||||
|
Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
|
||||||
|
This hardware comes with 16 bit-timer registers.
|
||||||
|
|
||||||
|
config SH_TIMER_TMU
|
||||||
|
bool "Renesas TMU timer driver" if COMPILE_TEST
|
||||||
|
depends on GENERIC_CLOCKEVENTS
|
||||||
|
default SYS_SUPPORTS_SH_TMU
|
||||||
|
help
|
||||||
|
This enables build of a clocksource and clockevent driver for
|
||||||
|
the 32-bit Timer Unit (TMU) hardware available on a wide range
|
||||||
|
SoCs from Renesas.
|
||||||
|
|
||||||
|
config EM_TIMER_STI
|
||||||
|
bool "Renesas STI timer driver" if COMPILE_TEST
|
||||||
|
depends on GENERIC_CLOCKEVENTS
|
||||||
|
default SYS_SUPPORTS_EM_STI
|
||||||
|
help
|
||||||
|
This enables build of a clocksource and clockevent driver for
|
||||||
|
the 48-bit System Timer (STI) hardware available on a SoCs
|
||||||
|
such as EMEV2 from former NEC Electronics.
|
||||||
|
|
|
@ -21,6 +21,7 @@ obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
|
||||||
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
|
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
|
||||||
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
|
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
|
||||||
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
|
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
|
||||||
|
obj-$(CONFIG_ARCH_U300) += timer-u300.o
|
||||||
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
|
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
|
||||||
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
|
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
|
||||||
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
|
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
|
||||||
|
@ -37,3 +38,4 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
|
||||||
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
|
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
|
||||||
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
|
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
|
||||||
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
|
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
|
||||||
|
obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
|
||||||
|
|
|
@ -277,6 +277,7 @@ static void __arch_timer_setup(unsigned type,
|
||||||
clk->set_next_event = arch_timer_set_next_event_phys;
|
clk->set_next_event = arch_timer_set_next_event_phys;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
|
||||||
clk->name = "arch_mem_timer";
|
clk->name = "arch_mem_timer";
|
||||||
clk->rating = 400;
|
clk->rating = 400;
|
||||||
clk->cpumask = cpu_all_mask;
|
clk->cpumask = cpu_all_mask;
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
|
#include <linux/clk-provider.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
|
@ -52,6 +53,8 @@
|
||||||
#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
|
#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
|
||||||
|
|
||||||
#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */
|
#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5) /* clock source */
|
||||||
|
#define TTC_CLK_CNTRL_PSV_MASK 0x1e
|
||||||
|
#define TTC_CLK_CNTRL_PSV_SHIFT 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the timers to use pre-scaling, using a fixed value for now that will
|
* Setup the timers to use pre-scaling, using a fixed value for now that will
|
||||||
|
@ -63,6 +66,8 @@
|
||||||
#define CLK_CNTRL_PRESCALE_EN 1
|
#define CLK_CNTRL_PRESCALE_EN 1
|
||||||
#define CNT_CNTRL_RESET (1 << 4)
|
#define CNT_CNTRL_RESET (1 << 4)
|
||||||
|
|
||||||
|
#define MAX_F_ERR 50
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ttc_timer - This definition defines local timer structure
|
* struct ttc_timer - This definition defines local timer structure
|
||||||
*
|
*
|
||||||
|
@ -82,6 +87,8 @@ struct ttc_timer {
|
||||||
container_of(x, struct ttc_timer, clk_rate_change_nb)
|
container_of(x, struct ttc_timer, clk_rate_change_nb)
|
||||||
|
|
||||||
struct ttc_timer_clocksource {
|
struct ttc_timer_clocksource {
|
||||||
|
u32 scale_clk_ctrl_reg_old;
|
||||||
|
u32 scale_clk_ctrl_reg_new;
|
||||||
struct ttc_timer ttc;
|
struct ttc_timer ttc;
|
||||||
struct clocksource cs;
|
struct clocksource cs;
|
||||||
};
|
};
|
||||||
|
@ -229,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
|
||||||
struct ttc_timer_clocksource, ttc);
|
struct ttc_timer_clocksource, ttc);
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case POST_RATE_CHANGE:
|
|
||||||
/*
|
|
||||||
* Do whatever is necessary to maintain a proper time base
|
|
||||||
*
|
|
||||||
* I cannot find a way to adjust the currently used clocksource
|
|
||||||
* to the new frequency. __clocksource_updatefreq_hz() sounds
|
|
||||||
* good, but does not work. Not sure what's that missing.
|
|
||||||
*
|
|
||||||
* This approach works, but triggers two clocksource switches.
|
|
||||||
* The first after unregister to clocksource jiffies. And
|
|
||||||
* another one after the register to the newly registered timer.
|
|
||||||
*
|
|
||||||
* Alternatively we could 'waste' another HW timer to ping pong
|
|
||||||
* between clock sources. That would also use one register and
|
|
||||||
* one unregister call, but only trigger one clocksource switch
|
|
||||||
* for the cost of another HW timer used by the OS.
|
|
||||||
*/
|
|
||||||
clocksource_unregister(&ttccs->cs);
|
|
||||||
clocksource_register_hz(&ttccs->cs,
|
|
||||||
ndata->new_rate / PRESCALE);
|
|
||||||
/* fall through */
|
|
||||||
case PRE_RATE_CHANGE:
|
case PRE_RATE_CHANGE:
|
||||||
|
{
|
||||||
|
u32 psv;
|
||||||
|
unsigned long factor, rate_low, rate_high;
|
||||||
|
|
||||||
|
if (ndata->new_rate > ndata->old_rate) {
|
||||||
|
factor = DIV_ROUND_CLOSEST(ndata->new_rate,
|
||||||
|
ndata->old_rate);
|
||||||
|
rate_low = ndata->old_rate;
|
||||||
|
rate_high = ndata->new_rate;
|
||||||
|
} else {
|
||||||
|
factor = DIV_ROUND_CLOSEST(ndata->old_rate,
|
||||||
|
ndata->new_rate);
|
||||||
|
rate_low = ndata->new_rate;
|
||||||
|
rate_high = ndata->old_rate;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_power_of_2(factor))
|
||||||
|
return NOTIFY_BAD;
|
||||||
|
|
||||||
|
if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
|
||||||
|
return NOTIFY_BAD;
|
||||||
|
|
||||||
|
factor = __ilog2_u32(factor);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* store timer clock ctrl register so we can restore it in case
|
||||||
|
* of an abort.
|
||||||
|
*/
|
||||||
|
ttccs->scale_clk_ctrl_reg_old =
|
||||||
|
__raw_readl(ttccs->ttc.base_addr +
|
||||||
|
TTC_CLK_CNTRL_OFFSET);
|
||||||
|
|
||||||
|
psv = (ttccs->scale_clk_ctrl_reg_old &
|
||||||
|
TTC_CLK_CNTRL_PSV_MASK) >>
|
||||||
|
TTC_CLK_CNTRL_PSV_SHIFT;
|
||||||
|
if (ndata->new_rate < ndata->old_rate)
|
||||||
|
psv -= factor;
|
||||||
|
else
|
||||||
|
psv += factor;
|
||||||
|
|
||||||
|
/* prescaler within legal range? */
|
||||||
|
if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
|
||||||
|
return NOTIFY_BAD;
|
||||||
|
|
||||||
|
ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
|
||||||
|
~TTC_CLK_CNTRL_PSV_MASK;
|
||||||
|
ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
|
||||||
|
|
||||||
|
|
||||||
|
/* scale down: adjust divider in post-change notification */
|
||||||
|
if (ndata->new_rate < ndata->old_rate)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
/* scale up: adjust divider now - before frequency change */
|
||||||
|
__raw_writel(ttccs->scale_clk_ctrl_reg_new,
|
||||||
|
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case POST_RATE_CHANGE:
|
||||||
|
/* scale up: pre-change notification did the adjustment */
|
||||||
|
if (ndata->new_rate > ndata->old_rate)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
/* scale down: adjust divider now - after frequency change */
|
||||||
|
__raw_writel(ttccs->scale_clk_ctrl_reg_new,
|
||||||
|
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
|
||||||
|
break;
|
||||||
|
|
||||||
case ABORT_RATE_CHANGE:
|
case ABORT_RATE_CHANGE:
|
||||||
|
/* we have to undo the adjustment in case we scale up */
|
||||||
|
if (ndata->new_rate < ndata->old_rate)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
/* restore original register value */
|
||||||
|
__raw_writel(ttccs->scale_clk_ctrl_reg_old,
|
||||||
|
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
|
||||||
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
|
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
|
||||||
|
@ -321,25 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case POST_RATE_CHANGE:
|
case POST_RATE_CHANGE:
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* clockevents_update_freq should be called with IRQ disabled on
|
|
||||||
* the CPU the timer provides events for. The timer we use is
|
|
||||||
* common to both CPUs, not sure if we need to run on both
|
|
||||||
* cores.
|
|
||||||
*/
|
|
||||||
local_irq_save(flags);
|
|
||||||
clockevents_update_freq(&ttcce->ce,
|
|
||||||
ndata->new_rate / PRESCALE);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
/* update cached frequency */
|
/* update cached frequency */
|
||||||
ttc->freq = ndata->new_rate;
|
ttc->freq = ndata->new_rate;
|
||||||
|
|
||||||
|
clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
|
||||||
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
}
|
|
||||||
case PRE_RATE_CHANGE:
|
case PRE_RATE_CHANGE:
|
||||||
case ABORT_RATE_CHANGE:
|
case ABORT_RATE_CHANGE:
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -410,7 +410,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
||||||
mevt = container_of(evt, struct mct_clock_event_device, evt);
|
mevt = container_of(evt, struct mct_clock_event_device, evt);
|
||||||
|
|
||||||
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
|
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
|
||||||
sprintf(mevt->name, "mct_tick%d", cpu);
|
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
|
||||||
|
|
||||||
evt->name = mevt->name;
|
evt->name = mevt->name;
|
||||||
evt->cpumask = cpumask_of(cpu);
|
evt->cpumask = cpumask_of(cpu);
|
||||||
|
|
|
@ -196,5 +196,5 @@ static void __init sun4i_timer_init(struct device_node *node)
|
||||||
clockevents_config_and_register(&sun4i_clockevent, rate,
|
clockevents_config_and_register(&sun4i_clockevent, rate,
|
||||||
TIMER_SYNC_TICKS, 0xffffffff);
|
TIMER_SYNC_TICKS, 0xffffffff);
|
||||||
}
|
}
|
||||||
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
|
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
|
||||||
sun4i_timer_init);
|
sun4i_timer_init);
|
||||||
|
|
|
@ -85,12 +85,6 @@ static u32 ticks_per_jiffy;
|
||||||
|
|
||||||
static struct clock_event_device __percpu *armada_370_xp_evt;
|
static struct clock_event_device __percpu *armada_370_xp_evt;
|
||||||
|
|
||||||
static void timer_ctrl_clrset(u32 clr, u32 set)
|
|
||||||
{
|
|
||||||
writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
|
|
||||||
timer_base + TIMER_CTRL_OFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void local_timer_ctrl_clrset(u32 clr, u32 set)
|
static void local_timer_ctrl_clrset(u32 clr, u32 set)
|
||||||
{
|
{
|
||||||
writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
|
writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
|
||||||
|
@ -245,7 +239,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
|
||||||
clr = TIMER0_25MHZ;
|
clr = TIMER0_25MHZ;
|
||||||
enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
|
enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
|
||||||
}
|
}
|
||||||
timer_ctrl_clrset(clr, set);
|
atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
|
||||||
local_timer_ctrl_clrset(clr, set);
|
local_timer_ctrl_clrset(clr, set);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -263,7 +257,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
|
||||||
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
|
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
|
||||||
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
|
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
|
||||||
|
|
||||||
timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
|
atomic_io_modify(timer_base + TIMER_CTRL_OFF,
|
||||||
|
TIMER0_RELOAD_EN | enable_mask,
|
||||||
|
TIMER0_RELOAD_EN | enable_mask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set scale and timer for sched_clock.
|
* Set scale and timer for sched_clock.
|
||||||
|
|
|
@ -35,20 +35,6 @@
|
||||||
#define ORION_ONESHOT_MAX 0xfffffffe
|
#define ORION_ONESHOT_MAX 0xfffffffe
|
||||||
|
|
||||||
static void __iomem *timer_base;
|
static void __iomem *timer_base;
|
||||||
static DEFINE_SPINLOCK(timer_ctrl_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Thread-safe access to TIMER_CTRL register
|
|
||||||
* (shared with watchdog timer)
|
|
||||||
*/
|
|
||||||
void orion_timer_ctrl_clrset(u32 clr, u32 set)
|
|
||||||
{
|
|
||||||
spin_lock(&timer_ctrl_lock);
|
|
||||||
writel((readl(timer_base + TIMER_CTRL) & ~clr) | set,
|
|
||||||
timer_base + TIMER_CTRL);
|
|
||||||
spin_unlock(&timer_ctrl_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(orion_timer_ctrl_clrset);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free-running clocksource handling.
|
* Free-running clocksource handling.
|
||||||
|
@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta,
|
||||||
{
|
{
|
||||||
/* setup and enable one-shot timer */
|
/* setup and enable one-shot timer */
|
||||||
writel(delta, timer_base + TIMER1_VAL);
|
writel(delta, timer_base + TIMER1_VAL);
|
||||||
orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN);
|
atomic_io_modify(timer_base + TIMER_CTRL,
|
||||||
|
TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode,
|
||||||
/* setup and enable periodic timer at 1/HZ intervals */
|
/* setup and enable periodic timer at 1/HZ intervals */
|
||||||
writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
|
writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
|
||||||
writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
|
writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
|
||||||
orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN);
|
atomic_io_modify(timer_base + TIMER_CTRL,
|
||||||
|
TIMER1_RELOAD_EN | TIMER1_EN,
|
||||||
|
TIMER1_RELOAD_EN | TIMER1_EN);
|
||||||
} else {
|
} else {
|
||||||
/* disable timer */
|
/* disable timer */
|
||||||
orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0);
|
atomic_io_modify(timer_base + TIMER_CTRL,
|
||||||
|
TIMER1_RELOAD_EN | TIMER1_EN, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +121,9 @@ static void __init orion_timer_init(struct device_node *np)
|
||||||
/* setup timer0 as free-running clocksource */
|
/* setup timer0 as free-running clocksource */
|
||||||
writel(~0, timer_base + TIMER0_VAL);
|
writel(~0, timer_base + TIMER0_VAL);
|
||||||
writel(~0, timer_base + TIMER0_RELOAD);
|
writel(~0, timer_base + TIMER0_RELOAD);
|
||||||
orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN);
|
atomic_io_modify(timer_base + TIMER_CTRL,
|
||||||
|
TIMER0_RELOAD_EN | TIMER0_EN,
|
||||||
|
TIMER0_RELOAD_EN | TIMER0_EN);
|
||||||
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
|
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
|
||||||
clk_get_rate(clk), 300, 32,
|
clk_get_rate(clk), 300, 32,
|
||||||
clocksource_mmio_readl_down);
|
clocksource_mmio_readl_down);
|
||||||
|
|
|
@ -0,0 +1,241 @@
|
||||||
|
/*
|
||||||
|
* Keystone broadcast clock-event
|
||||||
|
*
|
||||||
|
* Copyright 2013 Texas Instruments, Inc.
|
||||||
|
*
|
||||||
|
* Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/clk.h>
|
||||||
|
#include <linux/clockchips.h>
|
||||||
|
#include <linux/clocksource.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/of_address.h>
|
||||||
|
#include <linux/of_irq.h>
|
||||||
|
|
||||||
|
#define TIMER_NAME "timer-keystone"
|
||||||
|
|
||||||
|
/* Timer register offsets */
|
||||||
|
#define TIM12 0x10
|
||||||
|
#define TIM34 0x14
|
||||||
|
#define PRD12 0x18
|
||||||
|
#define PRD34 0x1c
|
||||||
|
#define TCR 0x20
|
||||||
|
#define TGCR 0x24
|
||||||
|
#define INTCTLSTAT 0x44
|
||||||
|
|
||||||
|
/* Timer register bitfields */
|
||||||
|
#define TCR_ENAMODE_MASK 0xC0
|
||||||
|
#define TCR_ENAMODE_ONESHOT_MASK 0x40
|
||||||
|
#define TCR_ENAMODE_PERIODIC_MASK 0x80
|
||||||
|
|
||||||
|
#define TGCR_TIM_UNRESET_MASK 0x03
|
||||||
|
#define INTCTLSTAT_ENINT_MASK 0x01
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct keystone_timer: holds timer's data
|
||||||
|
* @base: timer memory base address
|
||||||
|
* @hz_period: cycles per HZ period
|
||||||
|
* @event_dev: event device based on timer
|
||||||
|
*/
|
||||||
|
static struct keystone_timer {
|
||||||
|
void __iomem *base;
|
||||||
|
unsigned long hz_period;
|
||||||
|
struct clock_event_device event_dev;
|
||||||
|
} timer;
|
||||||
|
|
||||||
|
static inline u32 keystone_timer_readl(unsigned long rg)
|
||||||
|
{
|
||||||
|
return readl_relaxed(timer.base + rg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void keystone_timer_writel(u32 val, unsigned long rg)
|
||||||
|
{
|
||||||
|
writel_relaxed(val, timer.base + rg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* keystone_timer_barrier: write memory barrier
|
||||||
|
* use explicit barrier to avoid using readl/writel non relaxed function
|
||||||
|
* variants, because in our case non relaxed variants hide the true places
|
||||||
|
* where barrier is needed.
|
||||||
|
*/
|
||||||
|
static inline void keystone_timer_barrier(void)
|
||||||
|
{
|
||||||
|
__iowmb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* keystone_timer_config: configures timer to work in oneshot/periodic modes.
|
||||||
|
* @ mode: mode to configure
|
||||||
|
* @ period: cycles number to configure for
|
||||||
|
*/
|
||||||
|
static int keystone_timer_config(u64 period, enum clock_event_mode mode)
|
||||||
|
{
|
||||||
|
u32 tcr;
|
||||||
|
u32 off;
|
||||||
|
|
||||||
|
tcr = keystone_timer_readl(TCR);
|
||||||
|
off = tcr & ~(TCR_ENAMODE_MASK);
|
||||||
|
|
||||||
|
/* set enable mode */
|
||||||
|
switch (mode) {
|
||||||
|
case CLOCK_EVT_MODE_ONESHOT:
|
||||||
|
tcr |= TCR_ENAMODE_ONESHOT_MASK;
|
||||||
|
break;
|
||||||
|
case CLOCK_EVT_MODE_PERIODIC:
|
||||||
|
tcr |= TCR_ENAMODE_PERIODIC_MASK;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* disable timer */
|
||||||
|
keystone_timer_writel(off, TCR);
|
||||||
|
/* here we have to be sure the timer has been disabled */
|
||||||
|
keystone_timer_barrier();
|
||||||
|
|
||||||
|
/* reset counter to zero, set new period */
|
||||||
|
keystone_timer_writel(0, TIM12);
|
||||||
|
keystone_timer_writel(0, TIM34);
|
||||||
|
keystone_timer_writel(period & 0xffffffff, PRD12);
|
||||||
|
keystone_timer_writel(period >> 32, PRD34);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* enable timer
|
||||||
|
* here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers
|
||||||
|
* have been written.
|
||||||
|
*/
|
||||||
|
keystone_timer_barrier();
|
||||||
|
keystone_timer_writel(tcr, TCR);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void keystone_timer_disable(void)
|
||||||
|
{
|
||||||
|
u32 tcr;
|
||||||
|
|
||||||
|
tcr = keystone_timer_readl(TCR);
|
||||||
|
|
||||||
|
/* disable timer */
|
||||||
|
tcr &= ~(TCR_ENAMODE_MASK);
|
||||||
|
keystone_timer_writel(tcr, TCR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
struct clock_event_device *evt = dev_id;
|
||||||
|
|
||||||
|
evt->event_handler(evt);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int keystone_set_next_event(unsigned long cycles,
|
||||||
|
struct clock_event_device *evt)
|
||||||
|
{
|
||||||
|
return keystone_timer_config(cycles, evt->mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void keystone_set_mode(enum clock_event_mode mode,
|
||||||
|
struct clock_event_device *evt)
|
||||||
|
{
|
||||||
|
switch (mode) {
|
||||||
|
case CLOCK_EVT_MODE_PERIODIC:
|
||||||
|
keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
|
||||||
|
break;
|
||||||
|
case CLOCK_EVT_MODE_UNUSED:
|
||||||
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||||
|
case CLOCK_EVT_MODE_ONESHOT:
|
||||||
|
keystone_timer_disable();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init keystone_timer_init(struct device_node *np)
|
||||||
|
{
|
||||||
|
struct clock_event_device *event_dev = &timer.event_dev;
|
||||||
|
unsigned long rate;
|
||||||
|
struct clk *clk;
|
||||||
|
int irq, error;
|
||||||
|
|
||||||
|
irq = irq_of_parse_and_map(np, 0);
|
||||||
|
if (irq == NO_IRQ) {
|
||||||
|
pr_err("%s: failed to map interrupts\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
timer.base = of_iomap(np, 0);
|
||||||
|
if (!timer.base) {
|
||||||
|
pr_err("%s: failed to map registers\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
clk = of_clk_get(np, 0);
|
||||||
|
if (IS_ERR(clk)) {
|
||||||
|
pr_err("%s: failed to get clock\n", __func__);
|
||||||
|
iounmap(timer.base);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
error = clk_prepare_enable(clk);
|
||||||
|
if (error) {
|
||||||
|
pr_err("%s: failed to enable clock\n", __func__);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
rate = clk_get_rate(clk);
|
||||||
|
|
||||||
|
/* disable, use internal clock source */
|
||||||
|
keystone_timer_writel(0, TCR);
|
||||||
|
/* here we have to be sure the timer has been disabled */
|
||||||
|
keystone_timer_barrier();
|
||||||
|
|
||||||
|
/* reset timer as 64-bit, no pre-scaler, plus features are disabled */
|
||||||
|
keystone_timer_writel(0, TGCR);
|
||||||
|
|
||||||
|
/* unreset timer */
|
||||||
|
keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR);
|
||||||
|
|
||||||
|
/* init counter to zero */
|
||||||
|
keystone_timer_writel(0, TIM12);
|
||||||
|
keystone_timer_writel(0, TIM34);
|
||||||
|
|
||||||
|
timer.hz_period = DIV_ROUND_UP(rate, HZ);
|
||||||
|
|
||||||
|
/* enable timer interrupts */
|
||||||
|
keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT);
|
||||||
|
|
||||||
|
error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER,
|
||||||
|
TIMER_NAME, event_dev);
|
||||||
|
if (error) {
|
||||||
|
pr_err("%s: failed to setup irq\n", __func__);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* setup clockevent */
|
||||||
|
event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
|
||||||
|
event_dev->set_next_event = keystone_set_next_event;
|
||||||
|
event_dev->set_mode = keystone_set_mode;
|
||||||
|
event_dev->cpumask = cpu_all_mask;
|
||||||
|
event_dev->owner = THIS_MODULE;
|
||||||
|
event_dev->name = TIMER_NAME;
|
||||||
|
event_dev->irq = irq;
|
||||||
|
|
||||||
|
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
|
||||||
|
|
||||||
|
pr_info("keystone timer clock @%lu Hz\n", rate);
|
||||||
|
return;
|
||||||
|
err:
|
||||||
|
clk_put(clk);
|
||||||
|
iounmap(timer.base);
|
||||||
|
}
|
||||||
|
|
||||||
|
CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
|
||||||
|
keystone_timer_init);
|
|
@ -1,8 +1,4 @@
|
||||||
/*
|
/*
|
||||||
*
|
|
||||||
* arch/arm/mach-u300/timer.c
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Copyright (C) 2007-2009 ST-Ericsson AB
|
* Copyright (C) 2007-2009 ST-Ericsson AB
|
||||||
* License terms: GNU General Public License (GPL) version 2
|
* License terms: GNU General Public License (GPL) version 2
|
||||||
* Timer COH 901 328, runs the OS timer interrupt.
|
* Timer COH 901 328, runs the OS timer interrupt.
|
|
@ -140,12 +140,14 @@ int cpuidle_idle_call(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
|
||||||
|
|
||||||
broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
|
broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
|
||||||
|
|
||||||
if (broadcast)
|
if (broadcast &&
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
|
||||||
|
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
||||||
|
|
||||||
if (cpuidle_state_is_coupled(dev, drv, next_state))
|
if (cpuidle_state_is_coupled(dev, drv, next_state))
|
||||||
entered_state = cpuidle_enter_state_coupled(dev, drv,
|
entered_state = cpuidle_enter_state_coupled(dev, drv,
|
||||||
|
@ -153,11 +155,11 @@ int cpuidle_idle_call(void)
|
||||||
else
|
else
|
||||||
entered_state = cpuidle_enter_state(dev, drv, next_state);
|
entered_state = cpuidle_enter_state(dev, drv, next_state);
|
||||||
|
|
||||||
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||||
|
|
||||||
if (broadcast)
|
if (broadcast)
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
|
||||||
|
|
||||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
|
||||||
|
|
||||||
/* give the governor an opportunity to reflect on the outcome */
|
/* give the governor an opportunity to reflect on the outcome */
|
||||||
if (cpuidle_curr_governor->reflect)
|
if (cpuidle_curr_governor->reflect)
|
||||||
cpuidle_curr_governor->reflect(dev, entered_state);
|
cpuidle_curr_governor->reflect(dev, entered_state);
|
||||||
|
|
|
@ -317,6 +317,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
|
||||||
(clockid != CLOCK_MONOTONIC &&
|
(clockid != CLOCK_MONOTONIC &&
|
||||||
clockid != CLOCK_REALTIME &&
|
clockid != CLOCK_REALTIME &&
|
||||||
clockid != CLOCK_REALTIME_ALARM &&
|
clockid != CLOCK_REALTIME_ALARM &&
|
||||||
|
clockid != CLOCK_BOOTTIME &&
|
||||||
clockid != CLOCK_BOOTTIME_ALARM))
|
clockid != CLOCK_BOOTTIME_ALARM))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,11 @@ enum clock_event_mode {
|
||||||
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
|
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
|
||||||
#define CLOCK_EVT_FEAT_PERCPU 0x000040
|
#define CLOCK_EVT_FEAT_PERCPU 0x000040
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clockevent device is based on a hrtimer for broadcast
|
||||||
|
*/
|
||||||
|
#define CLOCK_EVT_FEAT_HRTIMER 0x000080
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct clock_event_device - clock event device descriptor
|
* struct clock_event_device - clock event device descriptor
|
||||||
* @event_handler: Assigned by the framework to be called by the low
|
* @event_handler: Assigned by the framework to be called by the low
|
||||||
|
@ -83,6 +88,7 @@ enum clock_event_mode {
|
||||||
* @name: ptr to clock event name
|
* @name: ptr to clock event name
|
||||||
* @rating: variable to rate clock event devices
|
* @rating: variable to rate clock event devices
|
||||||
* @irq: IRQ number (only for non CPU local devices)
|
* @irq: IRQ number (only for non CPU local devices)
|
||||||
|
* @bound_on: Bound on CPU
|
||||||
* @cpumask: cpumask to indicate for which CPUs this device works
|
* @cpumask: cpumask to indicate for which CPUs this device works
|
||||||
* @list: list head for the management code
|
* @list: list head for the management code
|
||||||
* @owner: module reference
|
* @owner: module reference
|
||||||
|
@ -113,6 +119,7 @@ struct clock_event_device {
|
||||||
const char *name;
|
const char *name;
|
||||||
int rating;
|
int rating;
|
||||||
int irq;
|
int irq;
|
||||||
|
int bound_on;
|
||||||
const struct cpumask *cpumask;
|
const struct cpumask *cpumask;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
|
@ -180,15 +187,17 @@ extern int tick_receive_broadcast(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
||||||
|
extern void tick_setup_hrtimer_broadcast(void);
|
||||||
extern int tick_check_broadcast_expired(void);
|
extern int tick_check_broadcast_expired(void);
|
||||||
#else
|
#else
|
||||||
static inline int tick_check_broadcast_expired(void) { return 0; }
|
static inline int tick_check_broadcast_expired(void) { return 0; }
|
||||||
|
static inline void tick_setup_hrtimer_broadcast(void) {};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||||
extern void clockevents_notify(unsigned long reason, void *arg);
|
extern int clockevents_notify(unsigned long reason, void *arg);
|
||||||
#else
|
#else
|
||||||
static inline void clockevents_notify(unsigned long reason, void *arg) {}
|
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
|
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
|
||||||
|
@ -196,8 +205,9 @@ static inline void clockevents_notify(unsigned long reason, void *arg) {}
|
||||||
static inline void clockevents_suspend(void) {}
|
static inline void clockevents_suspend(void) {}
|
||||||
static inline void clockevents_resume(void) {}
|
static inline void clockevents_resume(void) {}
|
||||||
|
|
||||||
static inline void clockevents_notify(unsigned long reason, void *arg) {}
|
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
|
||||||
static inline int tick_check_broadcast_expired(void) { return 0; }
|
static inline int tick_check_broadcast_expired(void) { return 0; }
|
||||||
|
static inline void tick_setup_hrtimer_broadcast(void) {};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -96,12 +96,12 @@ enum hrtimer_restart {
|
||||||
* @function: timer expiry callback function
|
* @function: timer expiry callback function
|
||||||
* @base: pointer to the timer base (per cpu and per clock)
|
* @base: pointer to the timer base (per cpu and per clock)
|
||||||
* @state: state information (See bit values above)
|
* @state: state information (See bit values above)
|
||||||
|
* @start_pid: timer statistics field to store the pid of the task which
|
||||||
|
* started the timer
|
||||||
* @start_site: timer statistics field to store the site where the timer
|
* @start_site: timer statistics field to store the site where the timer
|
||||||
* was started
|
* was started
|
||||||
* @start_comm: timer statistics field to store the name of the process which
|
* @start_comm: timer statistics field to store the name of the process which
|
||||||
* started the timer
|
* started the timer
|
||||||
* @start_pid: timer statistics field to store the pid of the task which
|
|
||||||
* started the timer
|
|
||||||
*
|
*
|
||||||
* The hrtimer structure must be initialized by hrtimer_init()
|
* The hrtimer structure must be initialized by hrtimer_init()
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -294,10 +294,14 @@ extern int runqueue_is_locked(int cpu);
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||||
extern void nohz_balance_enter_idle(int cpu);
|
extern void nohz_balance_enter_idle(int cpu);
|
||||||
extern void set_cpu_sd_state_idle(void);
|
extern void set_cpu_sd_state_idle(void);
|
||||||
extern int get_nohz_timer_target(void);
|
extern int get_nohz_timer_target(int pinned);
|
||||||
#else
|
#else
|
||||||
static inline void nohz_balance_enter_idle(int cpu) { }
|
static inline void nohz_balance_enter_idle(int cpu) { }
|
||||||
static inline void set_cpu_sd_state_idle(void) { }
|
static inline void set_cpu_sd_state_idle(void) { }
|
||||||
|
static inline int get_nohz_timer_target(int pinned)
|
||||||
|
{
|
||||||
|
return smp_processor_id();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -180,6 +180,7 @@ struct execute_work {
|
||||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||||
extern void __init_work(struct work_struct *work, int onstack);
|
extern void __init_work(struct work_struct *work, int onstack);
|
||||||
extern void destroy_work_on_stack(struct work_struct *work);
|
extern void destroy_work_on_stack(struct work_struct *work);
|
||||||
|
extern void destroy_delayed_work_on_stack(struct delayed_work *work);
|
||||||
static inline unsigned int work_static(struct work_struct *work)
|
static inline unsigned int work_static(struct work_struct *work)
|
||||||
{
|
{
|
||||||
return *work_data_bits(work) & WORK_STRUCT_STATIC;
|
return *work_data_bits(work) & WORK_STRUCT_STATIC;
|
||||||
|
@ -187,6 +188,7 @@ static inline unsigned int work_static(struct work_struct *work)
|
||||||
#else
|
#else
|
||||||
static inline void __init_work(struct work_struct *work, int onstack) { }
|
static inline void __init_work(struct work_struct *work, int onstack) { }
|
||||||
static inline void destroy_work_on_stack(struct work_struct *work) { }
|
static inline void destroy_work_on_stack(struct work_struct *work) { }
|
||||||
|
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
|
||||||
static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Get the preferred target CPU for NOHZ
|
|
||||||
*/
|
|
||||||
static int hrtimer_get_target(int this_cpu, int pinned)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_NO_HZ_COMMON
|
|
||||||
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
|
|
||||||
return get_nohz_timer_target();
|
|
||||||
#endif
|
|
||||||
return this_cpu;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With HIGHRES=y we do not migrate the timer when it is expiring
|
* With HIGHRES=y we do not migrate the timer when it is expiring
|
||||||
* before the next event on the target cpu because we cannot reprogram
|
* before the next event on the target cpu because we cannot reprogram
|
||||||
|
@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
|
||||||
struct hrtimer_clock_base *new_base;
|
struct hrtimer_clock_base *new_base;
|
||||||
struct hrtimer_cpu_base *new_cpu_base;
|
struct hrtimer_cpu_base *new_cpu_base;
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
int cpu = hrtimer_get_target(this_cpu, pinned);
|
int cpu = get_nohz_timer_target(pinned);
|
||||||
int basenum = base->index;
|
int basenum = base->index;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
|
|
|
@ -555,12 +555,15 @@ void resched_cpu(int cpu)
|
||||||
* selecting an idle cpu will add more delays to the timers than intended
|
* selecting an idle cpu will add more delays to the timers than intended
|
||||||
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
|
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
|
||||||
*/
|
*/
|
||||||
int get_nohz_timer_target(void)
|
int get_nohz_timer_target(int pinned)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
int i;
|
int i;
|
||||||
struct sched_domain *sd;
|
struct sched_domain *sd;
|
||||||
|
|
||||||
|
if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
|
||||||
|
return cpu;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_domain(cpu, sd) {
|
for_each_domain(cpu, sd) {
|
||||||
for_each_cpu(i, sched_domain_span(sd)) {
|
for_each_cpu(i, sched_domain_span(sd)) {
|
||||||
|
|
|
@ -124,7 +124,7 @@ config NO_HZ_FULL
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
config NO_HZ_FULL_ALL
|
config NO_HZ_FULL_ALL
|
||||||
bool "Full dynticks system on all CPUs by default"
|
bool "Full dynticks system on all CPUs by default (except CPU 0)"
|
||||||
depends on NO_HZ_FULL
|
depends on NO_HZ_FULL
|
||||||
help
|
help
|
||||||
If the user doesn't pass the nohz_full boot option to
|
If the user doesn't pass the nohz_full boot option to
|
||||||
|
|
|
@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o
|
||||||
|
|
||||||
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
|
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
|
||||||
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
|
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
|
||||||
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
|
ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
|
||||||
|
obj-y += tick-broadcast.o
|
||||||
|
obj-$(CONFIG_TICK_ONESHOT) += tick-broadcast-hrtimer.o
|
||||||
|
endif
|
||||||
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
|
obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o
|
||||||
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
|
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
|
||||||
obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
|
obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
|
||||||
|
|
|
@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(clockevents_config_and_register);
|
EXPORT_SYMBOL_GPL(clockevents_config_and_register);
|
||||||
|
|
||||||
|
int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
|
||||||
|
{
|
||||||
|
clockevents_config(dev, freq);
|
||||||
|
|
||||||
|
if (dev->mode == CLOCK_EVT_MODE_ONESHOT)
|
||||||
|
return clockevents_program_event(dev, dev->next_event, false);
|
||||||
|
|
||||||
|
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
||||||
|
dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* clockevents_update_freq - Update frequency and reprogram a clock event device.
|
* clockevents_update_freq - Update frequency and reprogram a clock event device.
|
||||||
* @dev: device to modify
|
* @dev: device to modify
|
||||||
|
@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register);
|
||||||
*
|
*
|
||||||
* Reconfigure and reprogram a clock event device in oneshot
|
* Reconfigure and reprogram a clock event device in oneshot
|
||||||
* mode. Must be called on the cpu for which the device delivers per
|
* mode. Must be called on the cpu for which the device delivers per
|
||||||
* cpu timer events with interrupts disabled! Returns 0 on success,
|
* cpu timer events. If called for the broadcast device the core takes
|
||||||
* -ETIME when the event is in the past.
|
* care of serialization.
|
||||||
|
*
|
||||||
|
* Returns 0 on success, -ETIME when the event is in the past.
|
||||||
*/
|
*/
|
||||||
int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
|
int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
|
||||||
{
|
{
|
||||||
clockevents_config(dev, freq);
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
|
local_irq_save(flags);
|
||||||
return 0;
|
ret = tick_broadcast_update_freq(dev, freq);
|
||||||
|
if (ret == -ENODEV)
|
||||||
return clockevents_program_event(dev, dev->next_event, false);
|
ret = __clockevents_update_freq(dev, freq);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -524,12 +542,13 @@ void clockevents_resume(void)
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||||
/**
|
/**
|
||||||
* clockevents_notify - notification about relevant events
|
* clockevents_notify - notification about relevant events
|
||||||
|
* Returns 0 on success, any other value on error
|
||||||
*/
|
*/
|
||||||
void clockevents_notify(unsigned long reason, void *arg)
|
int clockevents_notify(unsigned long reason, void *arg)
|
||||||
{
|
{
|
||||||
struct clock_event_device *dev, *tmp;
|
struct clock_event_device *dev, *tmp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int cpu;
|
int cpu, ret = 0;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
||||||
|
|
||||||
|
@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg)
|
||||||
|
|
||||||
case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
|
case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
|
||||||
case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
|
case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
|
||||||
tick_broadcast_oneshot_control(reason);
|
ret = tick_broadcast_oneshot_control(reason);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CLOCK_EVT_NOTIFY_CPU_DYING:
|
case CLOCK_EVT_NOTIFY_CPU_DYING:
|
||||||
|
@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
|
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(clockevents_notify);
|
EXPORT_SYMBOL_GPL(clockevents_notify);
|
||||||
|
|
||||||
|
|
|
@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work)
|
||||||
next.tv_sec++;
|
next.tv_sec++;
|
||||||
next.tv_nsec -= NSEC_PER_SEC;
|
next.tv_nsec -= NSEC_PER_SEC;
|
||||||
}
|
}
|
||||||
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
|
queue_delayed_work(system_power_efficient_wq,
|
||||||
|
&sync_cmos_work, timespec_to_jiffies(&next));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ntp_notify_cmos_timer(void)
|
void ntp_notify_cmos_timer(void)
|
||||||
{
|
{
|
||||||
schedule_delayed_work(&sync_cmos_work, 0);
|
queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* linux/kernel/time/tick-broadcast-hrtimer.c
|
||||||
|
* This file emulates a local clock event device
|
||||||
|
* via a pseudo clock device.
|
||||||
|
*/
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/hrtimer.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/profile.h>
|
||||||
|
#include <linux/clockchips.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#include "tick-internal.h"
|
||||||
|
|
||||||
|
static struct hrtimer bctimer;
|
||||||
|
|
||||||
|
static void bc_set_mode(enum clock_event_mode mode,
|
||||||
|
struct clock_event_device *bc)
|
||||||
|
{
|
||||||
|
switch (mode) {
|
||||||
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||||
|
/*
|
||||||
|
* Note, we cannot cancel the timer here as we might
|
||||||
|
* run into the following live lock scenario:
|
||||||
|
*
|
||||||
|
* cpu 0 cpu1
|
||||||
|
* lock(broadcast_lock);
|
||||||
|
* hrtimer_interrupt()
|
||||||
|
* bc_handler()
|
||||||
|
* tick_handle_oneshot_broadcast();
|
||||||
|
* lock(broadcast_lock);
|
||||||
|
* hrtimer_cancel()
|
||||||
|
* wait_for_callback()
|
||||||
|
*/
|
||||||
|
hrtimer_try_to_cancel(&bctimer);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called from the guts of the broadcast code when the cpu
|
||||||
|
* which is about to enter idle has the earliest broadcast timer event.
|
||||||
|
*/
|
||||||
|
static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We try to cancel the timer first. If the callback is on
|
||||||
|
* flight on some other cpu then we let it handle it. If we
|
||||||
|
* were able to cancel the timer nothing can rearm it as we
|
||||||
|
* own broadcast_lock.
|
||||||
|
*
|
||||||
|
* However we can also be called from the event handler of
|
||||||
|
* ce_broadcast_hrtimer itself when it expires. We cannot
|
||||||
|
* restart the timer because we are in the callback, but we
|
||||||
|
* can set the expiry time and let the callback return
|
||||||
|
* HRTIMER_RESTART.
|
||||||
|
*/
|
||||||
|
if (hrtimer_try_to_cancel(&bctimer) >= 0) {
|
||||||
|
hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
|
||||||
|
/* Bind the "device" to the cpu */
|
||||||
|
bc->bound_on = smp_processor_id();
|
||||||
|
} else if (bc->bound_on == smp_processor_id()) {
|
||||||
|
hrtimer_set_expires(&bctimer, expires);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct clock_event_device ce_broadcast_hrtimer = {
|
||||||
|
.set_mode = bc_set_mode,
|
||||||
|
.set_next_ktime = bc_set_next,
|
||||||
|
.features = CLOCK_EVT_FEAT_ONESHOT |
|
||||||
|
CLOCK_EVT_FEAT_KTIME |
|
||||||
|
CLOCK_EVT_FEAT_HRTIMER,
|
||||||
|
.rating = 0,
|
||||||
|
.bound_on = -1,
|
||||||
|
.min_delta_ns = 1,
|
||||||
|
.max_delta_ns = KTIME_MAX,
|
||||||
|
.min_delta_ticks = 1,
|
||||||
|
.max_delta_ticks = ULONG_MAX,
|
||||||
|
.mult = 1,
|
||||||
|
.shift = 0,
|
||||||
|
.cpumask = cpu_all_mask,
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum hrtimer_restart bc_handler(struct hrtimer *t)
|
||||||
|
{
|
||||||
|
ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
|
||||||
|
|
||||||
|
if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
|
||||||
|
return HRTIMER_NORESTART;
|
||||||
|
|
||||||
|
return HRTIMER_RESTART;
|
||||||
|
}
|
||||||
|
|
||||||
|
void tick_setup_hrtimer_broadcast(void)
|
||||||
|
{
|
||||||
|
hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||||
|
bctimer.function = bc_handler;
|
||||||
|
clockevents_register_device(&ce_broadcast_hrtimer);
|
||||||
|
}
|
|
@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
|
||||||
return (dev && tick_broadcast_device.evtdev == dev);
|
return (dev && tick_broadcast_device.evtdev == dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
|
||||||
|
{
|
||||||
|
int ret = -ENODEV;
|
||||||
|
|
||||||
|
if (tick_is_broadcast_device(dev)) {
|
||||||
|
raw_spin_lock(&tick_broadcast_lock);
|
||||||
|
ret = __clockevents_update_freq(dev, freq);
|
||||||
|
raw_spin_unlock(&tick_broadcast_lock);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void err_broadcast(const struct cpumask *mask)
|
static void err_broadcast(const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
|
pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
|
||||||
|
@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
|
||||||
*/
|
*/
|
||||||
static void tick_do_periodic_broadcast(void)
|
static void tick_do_periodic_broadcast(void)
|
||||||
{
|
{
|
||||||
raw_spin_lock(&tick_broadcast_lock);
|
|
||||||
|
|
||||||
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
|
||||||
tick_do_broadcast(tmpmask);
|
tick_do_broadcast(tmpmask);
|
||||||
|
|
||||||
raw_spin_unlock(&tick_broadcast_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
||||||
{
|
{
|
||||||
ktime_t next;
|
ktime_t next;
|
||||||
|
|
||||||
|
raw_spin_lock(&tick_broadcast_lock);
|
||||||
|
|
||||||
tick_do_periodic_broadcast();
|
tick_do_periodic_broadcast();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The device is in periodic mode. No reprogramming necessary:
|
* The device is in periodic mode. No reprogramming necessary:
|
||||||
*/
|
*/
|
||||||
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
|
||||||
return;
|
goto unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the next period for devices, which do not have
|
* Setup the next period for devices, which do not have
|
||||||
|
@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
||||||
next = ktime_add(next, tick_period);
|
next = ktime_add(next, tick_period);
|
||||||
|
|
||||||
if (!clockevents_program_event(dev, next, false))
|
if (!clockevents_program_event(dev, next, false))
|
||||||
return;
|
goto unlock;
|
||||||
tick_do_periodic_broadcast();
|
tick_do_periodic_broadcast();
|
||||||
}
|
}
|
||||||
|
unlock:
|
||||||
|
raw_spin_unlock(&tick_broadcast_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -630,24 +643,61 @@ again:
|
||||||
raw_spin_unlock(&tick_broadcast_lock);
|
raw_spin_unlock(&tick_broadcast_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
|
||||||
|
{
|
||||||
|
if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
|
||||||
|
return 0;
|
||||||
|
if (bc->next_event.tv64 == KTIME_MAX)
|
||||||
|
return 0;
|
||||||
|
return bc->bound_on == cpu ? -EBUSY : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void broadcast_shutdown_local(struct clock_event_device *bc,
|
||||||
|
struct clock_event_device *dev)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* For hrtimer based broadcasting we cannot shutdown the cpu
|
||||||
|
* local device if our own event is the first one to expire or
|
||||||
|
* if we own the broadcast timer.
|
||||||
|
*/
|
||||||
|
if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
|
||||||
|
if (broadcast_needs_cpu(bc, smp_processor_id()))
|
||||||
|
return;
|
||||||
|
if (dev->next_event.tv64 < bc->next_event.tv64)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void broadcast_move_bc(int deadcpu)
|
||||||
|
{
|
||||||
|
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||||
|
|
||||||
|
if (!bc || !broadcast_needs_cpu(bc, deadcpu))
|
||||||
|
return;
|
||||||
|
/* This moves the broadcast assignment to this cpu */
|
||||||
|
clockevents_program_event(bc, bc->next_event, 1);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Powerstate information: The system enters/leaves a state, where
|
* Powerstate information: The system enters/leaves a state, where
|
||||||
* affected devices might stop
|
* affected devices might stop
|
||||||
|
* Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
|
||||||
*/
|
*/
|
||||||
void tick_broadcast_oneshot_control(unsigned long reason)
|
int tick_broadcast_oneshot_control(unsigned long reason)
|
||||||
{
|
{
|
||||||
struct clock_event_device *bc, *dev;
|
struct clock_event_device *bc, *dev;
|
||||||
struct tick_device *td;
|
struct tick_device *td;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
int cpu;
|
int cpu, ret = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Periodic mode does not care about the enter/exit of power
|
* Periodic mode does not care about the enter/exit of power
|
||||||
* states
|
* states
|
||||||
*/
|
*/
|
||||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are called with preemtion disabled from the depth of the
|
* We are called with preemtion disabled from the depth of the
|
||||||
|
@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||||
dev = td->evtdev;
|
dev = td->evtdev;
|
||||||
|
|
||||||
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
bc = tick_broadcast_device.evtdev;
|
bc = tick_broadcast_device.evtdev;
|
||||||
|
|
||||||
|
@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||||
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
|
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
|
||||||
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||||
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
||||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
broadcast_shutdown_local(bc, dev);
|
||||||
/*
|
/*
|
||||||
* We only reprogram the broadcast timer if we
|
* We only reprogram the broadcast timer if we
|
||||||
* did not mark ourself in the force mask and
|
* did not mark ourself in the force mask and
|
||||||
|
@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||||
dev->next_event.tv64 < bc->next_event.tv64)
|
dev->next_event.tv64 < bc->next_event.tv64)
|
||||||
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If the current CPU owns the hrtimer broadcast
|
||||||
|
* mechanism, it cannot go deep idle and we remove the
|
||||||
|
* CPU from the broadcast mask. We don't have to go
|
||||||
|
* through the EXIT path as the local timer is not
|
||||||
|
* shutdown.
|
||||||
|
*/
|
||||||
|
ret = broadcast_needs_cpu(bc, cpu);
|
||||||
|
if (ret)
|
||||||
|
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
|
||||||
} else {
|
} else {
|
||||||
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
||||||
|
@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
||||||
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
|
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
|
||||||
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
|
cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
|
||||||
|
|
||||||
|
broadcast_move_bc(cpu);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,18 +98,19 @@ static void tick_periodic(int cpu)
|
||||||
void tick_handle_periodic(struct clock_event_device *dev)
|
void tick_handle_periodic(struct clock_event_device *dev)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
ktime_t next;
|
ktime_t next = dev->next_event;
|
||||||
|
|
||||||
tick_periodic(cpu);
|
tick_periodic(cpu);
|
||||||
|
|
||||||
if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
|
if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
|
||||||
return;
|
return;
|
||||||
/*
|
|
||||||
* Setup the next period for devices, which do not have
|
|
||||||
* periodic mode:
|
|
||||||
*/
|
|
||||||
next = ktime_add(dev->next_event, tick_period);
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
/*
|
||||||
|
* Setup the next period for devices, which do not have
|
||||||
|
* periodic mode:
|
||||||
|
*/
|
||||||
|
next = ktime_add(next, tick_period);
|
||||||
|
|
||||||
if (!clockevents_program_event(dev, next, false))
|
if (!clockevents_program_event(dev, next, false))
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
|
@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev)
|
||||||
* to be sure we're using a real hardware clocksource.
|
* to be sure we're using a real hardware clocksource.
|
||||||
* Otherwise we could get trapped in an infinite
|
* Otherwise we could get trapped in an infinite
|
||||||
* loop, as the tick_periodic() increments jiffies,
|
* loop, as the tick_periodic() increments jiffies,
|
||||||
* when then will increment time, posibly causing
|
* which then will increment time, possibly causing
|
||||||
* the loop to trigger again and again.
|
* the loop to trigger again and again.
|
||||||
*/
|
*/
|
||||||
if (timekeeping_valid_for_hres())
|
if (timekeeping_valid_for_hres())
|
||||||
tick_periodic(cpu);
|
tick_periodic(cpu);
|
||||||
next = ktime_add(next, tick_period);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
|
||||||
extern void tick_resume_oneshot(void);
|
extern void tick_resume_oneshot(void);
|
||||||
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
|
extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
|
||||||
extern void tick_broadcast_oneshot_control(unsigned long reason);
|
extern int tick_broadcast_oneshot_control(unsigned long reason);
|
||||||
extern void tick_broadcast_switch_to_oneshot(void);
|
extern void tick_broadcast_switch_to_oneshot(void);
|
||||||
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
|
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
|
||||||
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
|
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
|
||||||
|
@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
|
static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
|
||||||
static inline void tick_broadcast_switch_to_oneshot(void) { }
|
static inline void tick_broadcast_switch_to_oneshot(void) { }
|
||||||
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
||||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||||
|
@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
|
static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
|
||||||
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
||||||
static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||||
{
|
{
|
||||||
|
@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void);
|
||||||
extern void tick_broadcast_init(void);
|
extern void tick_broadcast_init(void);
|
||||||
extern void
|
extern void
|
||||||
tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
|
tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
|
||||||
|
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
|
||||||
|
|
||||||
#else /* !BROADCAST */
|
#else /* !BROADCAST */
|
||||||
|
|
||||||
|
@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
|
||||||
static inline void tick_suspend_broadcast(void) { }
|
static inline void tick_suspend_broadcast(void) { }
|
||||||
static inline int tick_resume_broadcast(void) { return 0; }
|
static inline int tick_resume_broadcast(void) { return 0; }
|
||||||
static inline void tick_broadcast_init(void) { }
|
static inline void tick_broadcast_init(void) { }
|
||||||
|
static inline int tick_broadcast_update_freq(struct clock_event_device *dev,
|
||||||
|
u32 freq) { return -ENODEV; }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the periodic handler in non broadcast mode
|
* Set the periodic handler in non broadcast mode
|
||||||
|
@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
|
||||||
return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
|
return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void do_timer(unsigned long ticks);
|
extern void do_timer(unsigned long ticks);
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
|
|
||||||
|
#include "timekeeping_internal.h"
|
||||||
|
|
||||||
static unsigned int sleep_time_bin[32] = {0};
|
static unsigned int sleep_time_bin[32] = {0};
|
||||||
|
|
||||||
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
|
static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
|
||||||
|
|
|
@ -81,6 +81,7 @@ struct tvec_base {
|
||||||
unsigned long timer_jiffies;
|
unsigned long timer_jiffies;
|
||||||
unsigned long next_timer;
|
unsigned long next_timer;
|
||||||
unsigned long active_timers;
|
unsigned long active_timers;
|
||||||
|
unsigned long all_timers;
|
||||||
struct tvec_root tv1;
|
struct tvec_root tv1;
|
||||||
struct tvec tv2;
|
struct tvec tv2;
|
||||||
struct tvec tv3;
|
struct tvec tv3;
|
||||||
|
@ -337,6 +338,20 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_timer_slack);
|
EXPORT_SYMBOL_GPL(set_timer_slack);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the list is empty, catch up ->timer_jiffies to the current time.
|
||||||
|
* The caller must hold the tvec_base lock. Returns true if the list
|
||||||
|
* was empty and therefore ->timer_jiffies was updated.
|
||||||
|
*/
|
||||||
|
static bool catchup_timer_jiffies(struct tvec_base *base)
|
||||||
|
{
|
||||||
|
if (!base->all_timers) {
|
||||||
|
base->timer_jiffies = jiffies;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
||||||
{
|
{
|
||||||
|
@ -383,15 +398,17 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
||||||
|
|
||||||
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
||||||
{
|
{
|
||||||
|
(void)catchup_timer_jiffies(base);
|
||||||
__internal_add_timer(base, timer);
|
__internal_add_timer(base, timer);
|
||||||
/*
|
/*
|
||||||
* Update base->active_timers and base->next_timer
|
* Update base->active_timers and base->next_timer
|
||||||
*/
|
*/
|
||||||
if (!tbase_get_deferrable(timer->base)) {
|
if (!tbase_get_deferrable(timer->base)) {
|
||||||
if (time_before(timer->expires, base->next_timer))
|
if (!base->active_timers++ ||
|
||||||
|
time_before(timer->expires, base->next_timer))
|
||||||
base->next_timer = timer->expires;
|
base->next_timer = timer->expires;
|
||||||
base->active_timers++;
|
|
||||||
}
|
}
|
||||||
|
base->all_timers++;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TIMER_STATS
|
#ifdef CONFIG_TIMER_STATS
|
||||||
|
@ -671,6 +688,8 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
|
||||||
detach_timer(timer, true);
|
detach_timer(timer, true);
|
||||||
if (!tbase_get_deferrable(timer->base))
|
if (!tbase_get_deferrable(timer->base))
|
||||||
base->active_timers--;
|
base->active_timers--;
|
||||||
|
base->all_timers--;
|
||||||
|
(void)catchup_timer_jiffies(base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
||||||
|
@ -685,6 +704,8 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
|
||||||
if (timer->expires == base->next_timer)
|
if (timer->expires == base->next_timer)
|
||||||
base->next_timer = base->timer_jiffies;
|
base->next_timer = base->timer_jiffies;
|
||||||
}
|
}
|
||||||
|
base->all_timers--;
|
||||||
|
(void)catchup_timer_jiffies(base);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -739,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
|
||||||
|
|
||||||
debug_activate(timer, expires);
|
debug_activate(timer, expires);
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpu = get_nohz_timer_target(pinned);
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
|
|
||||||
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
|
|
||||||
cpu = get_nohz_timer_target();
|
|
||||||
#endif
|
|
||||||
new_base = per_cpu(tvec_bases, cpu);
|
new_base = per_cpu(tvec_bases, cpu);
|
||||||
|
|
||||||
if (base != new_base) {
|
if (base != new_base) {
|
||||||
|
@ -939,8 +955,15 @@ void add_timer_on(struct timer_list *timer, int cpu)
|
||||||
* with the timer by holding the timer base lock. This also
|
* with the timer by holding the timer base lock. This also
|
||||||
* makes sure that a CPU on the way to stop its tick can not
|
* makes sure that a CPU on the way to stop its tick can not
|
||||||
* evaluate the timer wheel.
|
* evaluate the timer wheel.
|
||||||
|
*
|
||||||
|
* Spare the IPI for deferrable timers on idle targets though.
|
||||||
|
* The next busy ticks will take care of it. Except full dynticks
|
||||||
|
* require special care against races with idle_cpu(), lets deal
|
||||||
|
* with that later.
|
||||||
*/
|
*/
|
||||||
wake_up_nohz_cpu(cpu);
|
if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
|
||||||
|
wake_up_nohz_cpu(cpu);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&base->lock, flags);
|
spin_unlock_irqrestore(&base->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(add_timer_on);
|
EXPORT_SYMBOL_GPL(add_timer_on);
|
||||||
|
@ -1146,6 +1169,10 @@ static inline void __run_timers(struct tvec_base *base)
|
||||||
struct timer_list *timer;
|
struct timer_list *timer;
|
||||||
|
|
||||||
spin_lock_irq(&base->lock);
|
spin_lock_irq(&base->lock);
|
||||||
|
if (catchup_timer_jiffies(base)) {
|
||||||
|
spin_unlock_irq(&base->lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
while (time_after_eq(jiffies, base->timer_jiffies)) {
|
while (time_after_eq(jiffies, base->timer_jiffies)) {
|
||||||
struct list_head work_list;
|
struct list_head work_list;
|
||||||
struct list_head *head = &work_list;
|
struct list_head *head = &work_list;
|
||||||
|
@ -1160,7 +1187,7 @@ static inline void __run_timers(struct tvec_base *base)
|
||||||
!cascade(base, &base->tv4, INDEX(2)))
|
!cascade(base, &base->tv4, INDEX(2)))
|
||||||
cascade(base, &base->tv5, INDEX(3));
|
cascade(base, &base->tv5, INDEX(3));
|
||||||
++base->timer_jiffies;
|
++base->timer_jiffies;
|
||||||
list_replace_init(base->tv1.vec + index, &work_list);
|
list_replace_init(base->tv1.vec + index, head);
|
||||||
while (!list_empty(head)) {
|
while (!list_empty(head)) {
|
||||||
void (*fn)(unsigned long);
|
void (*fn)(unsigned long);
|
||||||
unsigned long data;
|
unsigned long data;
|
||||||
|
@ -1523,9 +1550,8 @@ static int init_timers_cpu(int cpu)
|
||||||
if (!base)
|
if (!base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Make sure that tvec_base is 2 byte aligned */
|
/* Make sure tvec_base has TIMER_FLAG_MASK bits free */
|
||||||
if (tbase_get_deferrable(base)) {
|
if (WARN_ON(base != tbase_get_base(base))) {
|
||||||
WARN_ON(1);
|
|
||||||
kfree(base);
|
kfree(base);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1559,6 +1585,7 @@ static int init_timers_cpu(int cpu)
|
||||||
base->timer_jiffies = jiffies;
|
base->timer_jiffies = jiffies;
|
||||||
base->next_timer = base->timer_jiffies;
|
base->next_timer = base->timer_jiffies;
|
||||||
base->active_timers = 0;
|
base->active_timers = 0;
|
||||||
|
base->all_timers = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1648,9 +1675,9 @@ void __init init_timers(void)
|
||||||
|
|
||||||
err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
|
err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
|
||||||
(void *)(long)smp_processor_id());
|
(void *)(long)smp_processor_id());
|
||||||
init_timer_stats();
|
|
||||||
|
|
||||||
BUG_ON(err != NOTIFY_OK);
|
BUG_ON(err != NOTIFY_OK);
|
||||||
|
|
||||||
|
init_timer_stats();
|
||||||
register_cpu_notifier(&timers_nb);
|
register_cpu_notifier(&timers_nb);
|
||||||
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
|
||||||
}
|
}
|
||||||
|
|
|
@ -516,6 +516,13 @@ void destroy_work_on_stack(struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
|
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
|
||||||
|
|
||||||
|
void destroy_delayed_work_on_stack(struct delayed_work *work)
|
||||||
|
{
|
||||||
|
destroy_timer_on_stack(&work->timer);
|
||||||
|
debug_object_free(&work->work, &work_debug_descr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void debug_work_activate(struct work_struct *work) { }
|
static inline void debug_work_activate(struct work_struct *work) { }
|
||||||
static inline void debug_work_deactivate(struct work_struct *work) { }
|
static inline void debug_work_deactivate(struct work_struct *work) { }
|
||||||
|
|
Loading…
Reference in New Issue