OpenCloudOS-Kernel/arch/arm/kernel/smp_twd.c

414 lines
9.2 KiB
C
Raw Normal View History

/*
* linux/arch/arm/kernel/smp_twd.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
#include <asm/hardware/gic.h>
/* set up by the platform code */
static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static bool common_setup_called;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
static int twd_ppi;
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
unsigned long ctrl;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
ctrl = 0;
}
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
/*
* local_timer_ack: checks for a local timer interrupt.
*
* If a local timer interrupt has occurred, acknowledge and return 1.
* Otherwise, return 0.
*/
static int twd_timer_ack(void)
{
if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
return 0;
}
static void twd_timer_stop(struct clock_event_device *clk)
{
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
}
ARM: 7535/1: Reprogram smp_twd based on new common clk framework notifiers Running cpufreq driver on imx6q, the following warning is seen. $ BUG: sleeping function called from invalid context at kernel/mutex.c:269 <snip> stack backtrace: Backtrace: [<80011d64>] (dump_backtrace+0x0/0x10c) from [<803fc164>] (dump_stack+0x18/0x1c) r6:bf8142e0 r5:bf814000 r4:806ac794 r3:bf814000 [<803fc14c>] (dump_stack+0x0/0x1c) from [<803fd444>] (print_usage_bug+0x250/0x2b 8) [<803fd1f4>] (print_usage_bug+0x0/0x2b8) from [<80060f90>] (mark_lock+0x56c/0x67 0) [<80060a24>] (mark_lock+0x0/0x670) from [<80061a20>] (__lock_acquire+0x98c/0x19b 4) [<80061094>] (__lock_acquire+0x0/0x19b4) from [<80062f14>] (lock_acquire+0x68/0x 7c) [<80062eac>] (lock_acquire+0x0/0x7c) from [<80400f28>] (mutex_lock_nested+0x78/0 x344) r7:00000000 r6:bf872000 r5:805cc858 r4:805c2a04 [<80400eb0>] (mutex_lock_nested+0x0/0x344) from [<803089ac>] (clk_get_rate+0x1c/ 0x58) [<80308990>] (clk_get_rate+0x0/0x58) from [<80013c48>] (twd_update_frequency+0x1 8/0x50) r5:bf253d04 r4:805cadf4 [<80013c30>] (twd_update_frequency+0x0/0x50) from [<80068e20>] (generic_smp_call _function_single_interrupt+0xd4/0x13c) r4:bf873ee0 r3:80013c30 [<80068d4c>] (generic_smp_call_function_single_interrupt+0x0/0x13c) from [<80013 34c>] (handle_IPI+0xc0/0x194) r8:00000001 r7:00000000 r6:80574e48 r5:bf872000 r4:80593958 [<8001328c>] (handle_IPI+0x0/0x194) from [<800084e8>] (gic_handle_irq+0x58/0x60) r8:00000000 r7:bf873f8c r6:bf873f58 r5:80593070 r4:f4000100 r3:00000005 [<80008490>] (gic_handle_irq+0x0/0x60) from [<8000e124>] (__irq_svc+0x44/0x60) Exception stack(0xbf873f58 to 0xbf873fa0) 3f40: 00000001 00000001 3f60: 00000000 bf814000 bf872000 805cab48 80405aa4 80597648 00000000 412fc09a 3f80: bf872000 bf873fac bf873f70 bf873fa0 80063844 8000f1f8 20000013 ffffffff r6:ffffffff r5:20000013 r4:8000f1f8 r3:bf814000 [<8000f1b8>] (default_idle+0x0/0x4c) from [<8000f428>] (cpu_idle+0x98/0x114) [<8000f390>] (cpu_idle+0x0/0x114) from [<803f9834>] (secondary_start_kernel+0x11 c/0x140) [<803f9718>] (secondary_start_kernel+0x0/0x140) from [<103f9234>] (0x103f9234) r6:10c03c7d r5:0000001f r4:4f86806a r3:803f921c It looks that the warning is caused by that twd_update_frequency() gets called from an atomic context while it calls clk_get_rate() where a mutex gets held. To fix the warning, let's convert common clk users over to clk notifiers in place of CPUfreq notifiers. This works out nicely for Cortex-A9 MPcore designs that scale all CPUs at the same frequency. Platforms that have not been converted to the common clk framework and support CPUfreq will rely on the old mechanism. Once these platforms are converted over fully then we can remove the CPUfreq-specific bits for good. Signed-off-by: Mike Turquette <mturquette@linaro.org> Signed-off-by: Shawn Guo <shawn.guo@linaro.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2012-09-14 05:04:18 +08:00
#ifdef CONFIG_COMMON_CLK
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
*/
static void twd_update_frequency(void *new_rate)
{
twd_timer_rate = *((unsigned long *) new_rate);
clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_rate_change(struct notifier_block *nb,
unsigned long flags, void *data)
{
struct clk_notifier_data *cnd = data;
/*
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
smp_call_function(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
}
static struct notifier_block twd_clk_nb = {
.notifier_call = twd_rate_change,
};
static int twd_clk_init(void)
{
if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return clk_notifier_register(twd_clk, &twd_clk_nb);
return 0;
}
core_initcall(twd_clk_init);
#elif defined (CONFIG_CPU_FREQ)
#include <linux/cpufreq.h>
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
*/
static void twd_update_frequency(void *data)
{
twd_timer_rate = clk_get_rate(twd_clk);
clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_cpufreq_transition(struct notifier_block *nb,
unsigned long state, void *data)
{
struct cpufreq_freqs *freqs = data;
/*
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*/
if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
NULL, 1);
return NOTIFY_OK;
}
static struct notifier_block twd_cpufreq_nb = {
.notifier_call = twd_cpufreq_transition,
};
static int twd_cpufreq_init(void)
{
ARM: 7336/1: smp_twd: Don't register CPUFREQ notifiers if local timers are not initialised Current ARM local timer code registers CPUFREQ notifiers even in case the twd_timer_setup() isn't called. That seems to be wrong and would eventually lead to kernel crash on the CPU frequency transitions on the SOCs where the local timer doesn't exist or broken because of hardware BUG. Fix it by testing twd_evt and *__this_cpu_ptr(twd_evt). The issue was observed with v3.3-rc3 and building an OMAP2+ kernel on OMAP3 SOC which doesn't have TWD. Below is the dump for reference : Unable to handle kernel paging request at virtual address 007e900 pgd = cdc20000 [007e9000] *pgd=00000000 Internal error: Oops: 5 [#1] SMP Modules linked in: CPU: 0 Not tainted (3.3.0-rc3-pm+debug+initramfs #9) PC is at twd_update_frequency+0x34/0x48 LR is at twd_update_frequency+0x10/0x48 pc : [<c001382c>] lr : [<c0013808>] psr: 60000093 sp : ce311dd8 ip : 00000000 fp : 00000000 r10: 00000000 r9 : 00000001 r8 : ce310000 r7 : c0440458 r6 : c00137f8 r5 : 00000000 r4 : c0947a74 r3 : 00000000 r2 : 007e9000 r1 : 00000000 r0 : 00000000 Flags: nZCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment usr Control: 10c5387d Table: 8dc20019 DAC: 00000015 Process sh (pid: 599, stack limit = 0xce3102f8) Stack: (0xce311dd8 to 0xce312000) 1dc0: 6000c 1de0: 00000001 00000002 00000000 00000000 00000000 00000000 00000 1e00: ffffffff c093d8f0 00000000 ce311ebc 00000001 00000001 ce310 1e20: c001386c c0437c4c c0e95b60 c0e95ba8 00000001 c0e95bf8 ffff4 1e40: 00000000 00000000 c005ef74 ce310000 c0435cf0 ce311ebc 00000 1e60: ce352b40 0007a120 c08d5108 c08ba040 c08ba040 c005f030 00000 1e80: c08bc554 c032fe2c 0007a120 c08d4b64 ce352b40 c08d8618 ffff8 1ea0: c08ba040 c033364c ce311ecc c0433b50 00000002 ffffffea c0330 1ec0: 0007a120 0007a120 22222201 00000000 22222222 00000000 ce357 1ee0: ce3d6000 cdc2aed8 ce352ba0 c0470164 00000002 c032f47c 00034 1f00: c0331cac ce352b40 00000007 c032f6d0 ce352bbc 0003d090 c0930 1f20: c093d8bc c03306a4 00000007 ce311f80 00000007 cdc2aec0 ce358 1f40: ce8d20c0 00000007 b6fe5000 ce311f80 00000007 ce310000 0000c 1f60: c000de74 ce987400 ce8d20c0 b6fe5000 00000000 00000000 0000c 1f80: 00000000 00000000 001fbac8 00000000 00000007 001fbac8 00004 1fa0: c000df04 c000dd60 00000007 001fbac8 00000001 b6fe5000 00000 1fc0: 00000007 001fbac8 00000007 00000004 b6fe5000 00000000 00202 1fe0: 00000000 beb565f8 00101ffc 00008e8c 60000010 00000001 00000 [<c001382c>] (twd_update_frequency+0x34/0x48) from [<c008ac4c>] ) [<c008ac4c>] (smp_call_function_single+0x17c/0x1c8) from [<c0013) [<c0013890>] (twd_cpufreq_transition+0x24/0x30) from [<c0437c4c>) [<c0437c4c>] (notifier_call_chain+0x44/0x84) from [<c005efe4>] () [<c005efe4>] (__srcu_notifier_call_chain+0x70/0xa4) from [<c005f) [<c005f030>] (srcu_notifier_call_chain+0x18/0x20) from [<c032fe2) [<c032fe2c>] (cpufreq_notify_transition+0xc8/0x1b0) from [<c0333) [<c033364c>] (omap_target+0x1b4/0x28c) from [<c032f47c>] (__cpuf) [<c032f47c>] (__cpufreq_driver_target+0x50/0x64) from [<c0331d24) [<c0331d24>] (cpufreq_set+0x78/0x98) from [<c032f6d0>] (store_sc) [<c032f6d0>] (store_scaling_setspeed+0x5c/0x74) from [<c03306a4>) [<c03306a4>] (store+0x58/0x74) from [<c014d868>] (sysfs_write_fi) [<c014d868>] (sysfs_write_file+0x80/0xb4) from [<c00f2c2c>] (vfs) [<c00f2c2c>] (vfs_write+0xa8/0x138) from [<c00f2e9c>] (sys_write) [<c00f2e9c>] (sys_write+0x40/0x6c) from [<c000dd60>] (ret_fast_s) Code: e594300c e792210c e1a01000 e5840004 (e7930002) ---[ end trace 5da3b5167c1ecdda ]--- Reported-by: Kevin Hilman <khilman@ti.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Tested-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2012-02-21 17:24:22 +08:00
if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return cpufreq_register_notifier(&twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
core_initcall(twd_cpufreq_init);
#endif
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
u64 waitjiffies;
/*
* If this is the first time round, we need to work out how fast
* the timer ticks
*/
if (twd_timer_rate == 0) {
printk(KERN_INFO "Calibrating local timer... ");
/* Wait for a tick to start */
waitjiffies = get_jiffies_64() + 1;
while (get_jiffies_64() < waitjiffies)
udelay(10);
/* OK, now the tick has started, let's get the timer going */
waitjiffies += 5;
/* enable, no interrupt or reload */
__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
(twd_timer_rate / 10000) % 100);
}
}
static irqreturn_t twd_handler(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
if (twd_timer_ack()) {
evt->event_handler(evt);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static struct clk *twd_get_clock(void)
{
struct clk *clk;
int err;
clk = clk_get_sys("smp_twd", NULL);
if (IS_ERR(clk)) {
pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
return clk;
}
err = clk_prepare_enable(clk);
if (err) {
pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
clk_put(clk);
return ERR_PTR(err);
}
return clk;
}
/*
* Setup the local clock events for a CPU.
*/
static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
int cpu = smp_processor_id();
/*
* If the basic setup for this CPU has been done before don't
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(*__this_cpu_ptr(twd_evt));
enable_percpu_irq(clk->irq, 0);
return 0;
}
per_cpu(percpu_setup_called, cpu) = true;
/*
* This stuff only need to be done once for the entire TWD cluster
* during the runtime of the system.
*/
if (!common_setup_called) {
twd_clk = twd_get_clock();
/*
* We use IS_ERR_OR_NULL() here, because if the clock stubs
* are active we will get a valid clk reference which is
* however NULL and will return the rate 0. In that case we
* need to calibrate the rate instead.
*/
if (!IS_ERR_OR_NULL(twd_clk))
twd_timer_rate = clk_get_rate(twd_clk);
else
twd_calibrate_rate();
common_setup_called = true;
}
/*
* The following is done once per CPU the first time .setup() is
* called.
*/
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
clk->irq = twd_ppi;
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
clockevents_config_and_register(clk, twd_timer_rate,
0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
return 0;
}
static struct local_timer_ops twd_lt_ops __cpuinitdata = {
.setup = twd_timer_setup,
.stop = twd_timer_stop,
};
static int __init twd_local_timer_common_register(void)
{
int err;
twd_evt = alloc_percpu(struct clock_event_device *);
if (!twd_evt) {
err = -ENOMEM;
goto out_free;
}
err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
if (err) {
pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
goto out_free;
}
err = local_timer_register(&twd_lt_ops);
if (err)
goto out_irq;
return 0;
out_irq:
free_percpu_irq(twd_ppi, twd_evt);
out_free:
iounmap(twd_base);
twd_base = NULL;
free_percpu(twd_evt);
return err;
}
int __init twd_local_timer_register(struct twd_local_timer *tlt)
{
if (twd_base || twd_evt)
return -EBUSY;
twd_ppi = tlt->res[1].start;
twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
if (!twd_base)
return -ENOMEM;
return twd_local_timer_common_register();
}
#ifdef CONFIG_OF
const static struct of_device_id twd_of_match[] __initconst = {
{ .compatible = "arm,cortex-a9-twd-timer", },
{ .compatible = "arm,cortex-a5-twd-timer", },
{ .compatible = "arm,arm11mp-twd-timer", },
{ },
};
void __init twd_local_timer_of_register(void)
{
struct device_node *np;
int err;
np = of_find_matching_node(NULL, twd_of_match);
if (!np)
return;
twd_ppi = irq_of_parse_and_map(np, 0);
if (!twd_ppi) {
err = -EINVAL;
goto out;
}
twd_base = of_iomap(np, 0);
if (!twd_base) {
err = -ENOMEM;
goto out;
}
err = twd_local_timer_common_register();
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
}
#endif