- Fix redefined macro in the arc timer ()

- Big cleanup for ARM arch timer clocksource in order to set the scene
   for ARMv8.6 and provide support for higher frequencies with longer
   roll up (Marc Zyngier)
 
 - Make arch dependant the Exynos MCT and Samsung PWM timers (Krzysztof
   Kozlowski)
 
 - Select the TIMER_OF option for the timer TI DM (Kees Cook)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGn3N4YVz0WNVyHskqDIjiipP6E8FAmFzIzIACgkQqDIjiipP
 6E+FtwgAiKcoatwiAXIBqb+9Xdlj2hVOSwH+MbZOWK/FhnU8/ylVkR+f7cDzpd35
 IoyV1coxNDh2a6ZSnodThOJR95wP+BDlxp8H90Hz5axA+g/lU7aZ1bM6hDc1fTz7
 DQLKOQNkOrIiW8TRctbTuc2qot6l7n+GIT1VmksXXyZSGhhxPDloaUFL7fXkA7XE
 QY0ekmkElYzUpelrieklqpIZ4W23iNoSqUeE4SKr+0F+BTVDi4/mft0oAED8QVlw
 qc66N2OivKF5BbhdxiZaeWKVV12WPvxVxcEyi38UzjYcdbZqOYlx5q4QAQPkVDMm
 ehPtEW0SGvT2jMf5DZCV/rtLPYQz7Q==
 =6EYk
 -----END PGP SIGNATURE-----

Merge tag 'timers-v5.16-rc1' into timers/core

Pull timers update for v5.16 from Daniel Lezcano:

- Fix redefined macro in the arc timer ()

- Big cleanup for ARM arch timer clocksource in order to set the scene
  for ARMv8.6 and provide support for higher frequencies with longer
  roll up (Marc Zyngier)

- Make arch dependant the Exynos MCT and Samsung PWM timers (Krzysztof
  Kozlowski)

- Select the TIMER_OF option for the timer TI DM (Kees Cook)

Link: https://lore.kernel.org/r/65693aaf-ab94-c9bb-a97b-a2bb77033a54@linaro.org
Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
Borislav Petkov 2021-10-24 17:14:23 +02:00
commit a8da61cee9
7 changed files with 201 additions and 146 deletions

View File

@ -7,6 +7,7 @@
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h> #include <linux/types.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
@ -24,29 +25,35 @@ int arch_timer_arch_init(void);
* the code. At least it does so with a recent GCC (4.6.3). * the code. At least it does so with a recent GCC (4.6.3).
*/ */
static __always_inline static __always_inline
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{ {
if (access == ARCH_TIMER_PHYS_ACCESS) { if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
isb();
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
break; break;
default:
BUILD_BUG();
} }
} else if (access == ARCH_TIMER_VIRT_ACCESS) { } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
isb();
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
break; break;
default:
BUILD_BUG();
} }
} else {
BUILD_BUG();
} }
isb();
} }
static __always_inline static __always_inline
@ -59,19 +66,19 @@ u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
break; break;
case ARCH_TIMER_REG_TVAL: default:
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); BUILD_BUG();
break;
} }
} else if (access == ARCH_TIMER_VIRT_ACCESS) { } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
break; break;
case ARCH_TIMER_REG_TVAL: default:
asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); BUILD_BUG();
break;
} }
} else {
BUILD_BUG();
} }
return val; return val;

View File

@ -32,7 +32,7 @@
({ \ ({ \
const struct arch_timer_erratum_workaround *__wa; \ const struct arch_timer_erratum_workaround *__wa; \
__wa = __this_cpu_read(timer_unstable_counter_workaround); \ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
(__wa && __wa->h) ? __wa->h : arch_timer_##h; \ (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
}) })
#else #else
@ -52,8 +52,6 @@ struct arch_timer_erratum_workaround {
enum arch_timer_erratum_match_type match_type; enum arch_timer_erratum_match_type match_type;
const void *id; const void *id;
const char *desc; const char *desc;
u32 (*read_cntp_tval_el0)(void);
u32 (*read_cntv_tval_el0)(void);
u64 (*read_cntpct_el0)(void); u64 (*read_cntpct_el0)(void);
u64 (*read_cntvct_el0)(void); u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
@ -64,24 +62,15 @@ struct arch_timer_erratum_workaround {
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround); timer_unstable_counter_workaround);
/* inline sysreg accessors that make erratum_handler() work */
static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
{
return read_sysreg(cntp_tval_el0);
}
static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
{
return read_sysreg(cntv_tval_el0);
}
static inline notrace u64 arch_timer_read_cntpct_el0(void) static inline notrace u64 arch_timer_read_cntpct_el0(void)
{ {
isb();
return read_sysreg(cntpct_el0); return read_sysreg(cntpct_el0);
} }
static inline notrace u64 arch_timer_read_cntvct_el0(void) static inline notrace u64 arch_timer_read_cntvct_el0(void)
{ {
isb();
return read_sysreg(cntvct_el0); return read_sysreg(cntvct_el0);
} }
@ -102,51 +91,58 @@ static inline notrace u64 arch_timer_read_cntvct_el0(void)
* the code. * the code.
*/ */
static __always_inline static __always_inline
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
{ {
if (access == ARCH_TIMER_PHYS_ACCESS) { if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntp_ctl_el0); write_sysreg(val, cntp_ctl_el0);
isb();
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
write_sysreg(val, cntp_tval_el0); write_sysreg(val, cntp_cval_el0);
break; break;
default:
BUILD_BUG();
} }
} else if (access == ARCH_TIMER_VIRT_ACCESS) { } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntv_ctl_el0); write_sysreg(val, cntv_ctl_el0);
isb();
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
write_sysreg(val, cntv_tval_el0); write_sysreg(val, cntv_cval_el0);
break; break;
default:
BUILD_BUG();
} }
} else {
BUILD_BUG();
} }
isb();
} }
static __always_inline static __always_inline
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{ {
if (access == ARCH_TIMER_PHYS_ACCESS) { if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntp_ctl_el0); return read_sysreg(cntp_ctl_el0);
case ARCH_TIMER_REG_TVAL: default:
return arch_timer_reg_read_stable(cntp_tval_el0); BUILD_BUG();
} }
} else if (access == ARCH_TIMER_VIRT_ACCESS) { } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
return read_sysreg(cntv_ctl_el0); return read_sysreg(cntv_ctl_el0);
case ARCH_TIMER_REG_TVAL: default:
return arch_timer_reg_read_stable(cntv_tval_el0); BUILD_BUG();
} }
} }
BUG(); BUILD_BUG();
unreachable();
} }
static inline u32 arch_timer_get_cntfrq(void) static inline u32 arch_timer_get_cntfrq(void)
@ -169,7 +165,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{ {
u64 cnt; u64 cnt;
isb();
cnt = arch_timer_reg_read_stable(cntpct_el0); cnt = arch_timer_reg_read_stable(cntpct_el0);
arch_counter_enforce_ordering(cnt); arch_counter_enforce_ordering(cnt);
return cnt; return cnt;
@ -189,7 +184,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{ {
u64 cnt; u64 cnt;
isb();
cnt = arch_timer_reg_read_stable(cntvct_el0); cnt = arch_timer_reg_read_stable(cntvct_el0);
arch_counter_enforce_ordering(cnt); arch_counter_enforce_ordering(cnt);
return cnt; return cnt;

View File

@ -24,6 +24,7 @@ config I8253_LOCK
config OMAP_DM_TIMER config OMAP_DM_TIMER
bool bool
select TIMER_OF
config CLKBLD_I8253 config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@ -417,12 +418,14 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64 depends on ARM || ARM64
depends on ARCH_EXYNOS || COMPILE_TEST
help help
Support for Multi Core Timer controller on Exynos SoCs. Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM depends on HAS_IOMEM
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help help
This is a new clocksource driver for the PWM timer found in This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver

View File

@ -225,7 +225,7 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX); write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq); sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
@ -245,7 +245,7 @@ static void arc_timer_event_setup(unsigned int cycles)
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
} }
@ -294,7 +294,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* explicitly clears IP bit * explicitly clears IP bit
* 2. Re-arm interrupt if periodic by writing to IE bit [0] * 2. Re-arm interrupt if periodic by writing to IE bit [0]
*/ */
write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
evt->event_handler(evt); evt->event_handler(evt);

View File

@ -44,23 +44,29 @@
#define CNTACR_RWVT BIT(4) #define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5) #define CNTACR_RWPT BIT(5)
#define CNTVCT_LO 0x08 #define CNTVCT_LO 0x00
#define CNTVCT_HI 0x0c #define CNTPCT_LO 0x08
#define CNTFRQ 0x10 #define CNTFRQ 0x10
#define CNTP_TVAL 0x28 #define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c #define CNTP_CTL 0x2c
#define CNTV_TVAL 0x38 #define CNTV_CVAL_LO 0x30
#define CNTV_CTL 0x3c #define CNTV_CTL 0x3c
static unsigned arch_timers_present __initdata; /*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
static void __iomem *arch_counter_base __ro_after_init; static unsigned arch_timers_present __initdata;
struct arch_timer { struct arch_timer {
void __iomem *base; void __iomem *base;
struct clock_event_device evt; struct clock_event_device evt;
}; };
static struct arch_timer *arch_timer_mem __ro_after_init;
#define to_arch_timer(e) container_of(e, struct arch_timer, evt) #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init; static u32 arch_timer_rate __ro_after_init;
@ -95,33 +101,58 @@ static int __init early_evtstrm_cfg(char *buf)
} }
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
* Makes an educated guess at a valid counter width based on the Generic Timer
* specification. Of note:
* 1) the system counter is at least 56 bits wide
* 2) a roll-over time of not less than 40 years
*
* See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
*/
static int arch_counter_get_width(void)
{
u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
/* guarantee the returned width is within the valid range */
return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
}
/* /*
* Architected system timer support. * Architected system timer support.
*/ */
static __always_inline static __always_inline
void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk); struct arch_timer *timer = to_arch_timer(clk);
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
writel_relaxed(val, timer->base + CNTP_CTL); writel_relaxed((u32)val, timer->base + CNTP_CTL);
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
writel_relaxed(val, timer->base + CNTP_TVAL); /*
* Not guaranteed to be atomic, so the timer
* must be disabled at this point.
*/
writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
break; break;
default:
BUILD_BUG();
} }
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk); struct arch_timer *timer = to_arch_timer(clk);
switch (reg) { switch (reg) {
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
writel_relaxed(val, timer->base + CNTV_CTL); writel_relaxed((u32)val, timer->base + CNTV_CTL);
break; break;
case ARCH_TIMER_REG_TVAL: case ARCH_TIMER_REG_CVAL:
writel_relaxed(val, timer->base + CNTV_TVAL); /* Same restriction as above */
writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
break; break;
default:
BUILD_BUG();
} }
} else { } else {
arch_timer_reg_write_cp15(access, reg, val); arch_timer_reg_write_cp15(access, reg, val);
@ -140,9 +171,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTP_CTL); val = readl_relaxed(timer->base + CNTP_CTL);
break; break;
case ARCH_TIMER_REG_TVAL: default:
val = readl_relaxed(timer->base + CNTP_TVAL); BUILD_BUG();
break;
} }
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk); struct arch_timer *timer = to_arch_timer(clk);
@ -150,9 +180,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL: case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTV_CTL); val = readl_relaxed(timer->base + CNTV_CTL);
break; break;
case ARCH_TIMER_REG_TVAL: default:
val = readl_relaxed(timer->base + CNTV_TVAL); BUILD_BUG();
break;
} }
} else { } else {
val = arch_timer_reg_read_cp15(access, reg); val = arch_timer_reg_read_cp15(access, reg);
@ -205,13 +234,11 @@ static struct clocksource clocksource_counter = {
.id = CSID_ARM_ARCH_COUNTER, .id = CSID_ARM_ARCH_COUNTER,
.rating = 400, .rating = 400,
.read = arch_counter_read, .read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
static struct cyclecounter cyclecounter __ro_after_init = { static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc, .read = arch_counter_read_cc,
.mask = CLOCKSOURCE_MASK(56),
}; };
struct ate_acpi_oem_info { struct ate_acpi_oem_info {
@ -239,16 +266,6 @@ struct ate_acpi_oem_info {
_new; \ _new; \
}) })
static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
{
return __fsl_a008585_read_reg(cntp_tval_el0);
}
static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
{
return __fsl_a008585_read_reg(cntv_tval_el0);
}
static u64 notrace fsl_a008585_read_cntpct_el0(void) static u64 notrace fsl_a008585_read_cntpct_el0(void)
{ {
return __fsl_a008585_read_reg(cntpct_el0); return __fsl_a008585_read_reg(cntpct_el0);
@ -285,16 +302,6 @@ static u64 notrace fsl_a008585_read_cntvct_el0(void)
_new; \ _new; \
}) })
static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
{
return __hisi_161010101_read_reg(cntp_tval_el0);
}
static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
{
return __hisi_161010101_read_reg(cntv_tval_el0);
}
static u64 notrace hisi_161010101_read_cntpct_el0(void) static u64 notrace hisi_161010101_read_cntpct_el0(void)
{ {
return __hisi_161010101_read_reg(cntpct_el0); return __hisi_161010101_read_reg(cntpct_el0);
@ -379,16 +386,6 @@ static u64 notrace sun50i_a64_read_cntvct_el0(void)
{ {
return __sun50i_a64_read_reg(cntvct_el0); return __sun50i_a64_read_reg(cntvct_el0);
} }
static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
{
return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
}
static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
{
return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
}
#endif #endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
@ -397,7 +394,7 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, static void erratum_set_next_event_generic(const int access, unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
unsigned long ctrl; unsigned long ctrl;
@ -418,17 +415,17 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
} }
static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt, static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0; return 0;
} }
static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt, static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0; return 0;
} }
@ -438,12 +435,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt, .match_type = ate_match_dt,
.id = "fsl,erratum-a008585", .id = "fsl,erratum-a008585",
.desc = "Freescale erratum a005858", .desc = "Freescale erratum a005858",
.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0, .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0, .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt, .set_next_event_virt = erratum_set_next_event_virt,
}, },
#endif #endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101 #ifdef CONFIG_HISILICON_ERRATUM_161010101
@ -451,23 +446,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt, .match_type = ate_match_dt,
.id = "hisilicon,erratum-161010101", .id = "hisilicon,erratum-161010101",
.desc = "HiSilicon erratum 161010101", .desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt, .set_next_event_virt = erratum_set_next_event_virt,
}, },
{ {
.match_type = ate_match_acpi_oem_info, .match_type = ate_match_acpi_oem_info,
.id = hisi_161010101_oem_info, .id = hisi_161010101_oem_info,
.desc = "HiSilicon erratum 161010101", .desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt, .set_next_event_virt = erratum_set_next_event_virt,
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_858921 #ifdef CONFIG_ARM64_ERRATUM_858921
@ -484,12 +475,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt, .match_type = ate_match_dt,
.id = "allwinner,erratum-unknown1", .id = "allwinner,erratum-unknown1",
.desc = "Allwinner erratum UNKNOWN1", .desc = "Allwinner erratum UNKNOWN1",
.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0, .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0, .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_phys = erratum_set_next_event_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt, .set_next_event_virt = erratum_set_next_event_virt,
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_1418040 #ifdef CONFIG_ARM64_ERRATUM_1418040
@ -727,10 +716,18 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
unsigned long ctrl; unsigned long ctrl;
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
if (access == ARCH_TIMER_PHYS_ACCESS)
cnt = __arch_counter_get_cntpct();
else
cnt = __arch_counter_get_cntvct();
arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
} }
@ -748,23 +745,79 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0; return 0;
} }
static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
{
u32 cnt_lo, cnt_hi, tmp_hi;
do {
cnt_hi = readl_relaxed(t->base + offset_lo + 4);
cnt_lo = readl_relaxed(t->base + offset_lo);
tmp_hi = readl_relaxed(t->base + offset_lo + 4);
} while (cnt_hi != tmp_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
}
static __always_inline void set_next_event_mem(const int access, unsigned long evt,
struct clock_event_device *clk)
{
struct arch_timer *timer = to_arch_timer(clk);
unsigned long ctrl;
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
else
cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt_mem(unsigned long evt, static int arch_timer_set_next_event_virt_mem(unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
return 0; return 0;
} }
static int arch_timer_set_next_event_phys_mem(unsigned long evt, static int arch_timer_set_next_event_phys_mem(unsigned long evt,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
return 0; return 0;
} }
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
const struct midr_range broken_cval_midrs[] = {
/*
* XGene-1 implements CVAL in terms of TVAL, meaning
* that the maximum timer range is 32bit. Shame on them.
*/
MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
APM_CPU_PART_POTENZA)),
{},
};
if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits");
return CLOCKSOURCE_MASK(32);
}
#endif
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
static void __arch_timer_setup(unsigned type, static void __arch_timer_setup(unsigned type,
struct clock_event_device *clk) struct clock_event_device *clk)
{ {
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) { if (type == ARCH_TIMER_TYPE_CP15) {
@ -796,6 +849,7 @@ static void __arch_timer_setup(unsigned type,
} }
clk->set_next_event = sne; clk->set_next_event = sne;
max_delta = __arch_timer_check_delta();
} else { } else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer"; clk->name = "arch_mem_timer";
@ -812,11 +866,13 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event = clk->set_next_event =
arch_timer_set_next_event_phys_mem; arch_timer_set_next_event_phys_mem;
} }
max_delta = CLOCKSOURCE_MASK(56);
} }
clk->set_state_shutdown(clk); clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
} }
static void arch_timer_evtstrm_enable(int divider) static void arch_timer_evtstrm_enable(int divider)
@ -986,15 +1042,7 @@ bool arch_timer_evtstrm_available(void)
static u64 arch_counter_get_cntvct_mem(void) static u64 arch_counter_get_cntvct_mem(void)
{ {
u32 vct_lo, vct_hi, tmp_hi; return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
do {
vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
} while (vct_hi != tmp_hi);
return ((u64) vct_hi << 32) | vct_lo;
} }
static struct arch_timer_kvm_info arch_timer_kvm_info; static struct arch_timer_kvm_info arch_timer_kvm_info;
@ -1007,6 +1055,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
static void __init arch_counter_register(unsigned type) static void __init arch_counter_register(unsigned type)
{ {
u64 start_count; u64 start_count;
int width;
/* Register the CP15 based counter if we have one */ /* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) { if (type & ARCH_TIMER_TYPE_CP15) {
@ -1031,6 +1080,10 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem; arch_timer_read_counter = arch_counter_get_cntvct_mem;
} }
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
if (!arch_counter_suspend_stop) if (!arch_counter_suspend_stop)
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter(); start_count = arch_timer_read_counter();
@ -1040,8 +1093,7 @@ static void __init arch_counter_register(unsigned type)
timecounter_init(&arch_timer_kvm_info.timecounter, timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count); &cyclecounter, start_count);
/* 56 bits minimum, so we assume worst case rollover */ sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
} }
static void arch_timer_stop(struct clock_event_device *clk) static void arch_timer_stop(struct clock_event_device *clk)
@ -1182,25 +1234,25 @@ static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{ {
int ret; int ret;
irq_handler_t func; irq_handler_t func;
struct arch_timer *t;
t = kzalloc(sizeof(*t), GFP_KERNEL); arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
if (!t) if (!arch_timer_mem)
return -ENOMEM; return -ENOMEM;
t->base = base; arch_timer_mem->base = base;
t->evt.irq = irq; arch_timer_mem->evt.irq = irq;
__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt); __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
if (arch_timer_mem_use_virtual) if (arch_timer_mem_use_virtual)
func = arch_timer_handler_virt_mem; func = arch_timer_handler_virt_mem;
else else
func = arch_timer_handler_phys_mem; func = arch_timer_handler_phys_mem;
ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
if (ret) { if (ret) {
pr_err("Failed to request mem timer irq\n"); pr_err("Failed to request mem timer irq\n");
kfree(t); kfree(arch_timer_mem);
arch_timer_mem = NULL;
} }
return ret; return ret;
@ -1458,7 +1510,6 @@ arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
return ret; return ret;
} }
arch_counter_base = base;
arch_timers_present |= ARCH_TIMER_TYPE_MEM; arch_timers_present |= ARCH_TIMER_TYPE_MEM;
return 0; return 0;

View File

@ -24,7 +24,7 @@
enum arch_timer_reg { enum arch_timer_reg {
ARCH_TIMER_REG_CTRL, ARCH_TIMER_REG_CTRL,
ARCH_TIMER_REG_TVAL, ARCH_TIMER_REG_CVAL,
}; };
enum arch_timer_ppi_nr { enum arch_timer_ppi_nr {

View File

@ -17,8 +17,8 @@
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
/* CTRL reg bits */ /* CTRL reg bits */
#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */ #define ARC_TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ #define ARC_TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMERN_MAX 0xFFFFFFFF #define ARC_TIMERN_MAX 0xFFFFFFFF