2005-09-07 06:16:27 +08:00
|
|
|
/*
|
|
|
|
* Detect Soft Lockups
|
|
|
|
*
|
2006-03-24 19:18:41 +08:00
|
|
|
* started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
|
2005-09-07 06:16:27 +08:00
|
|
|
*
|
|
|
|
* this code detects soft lockups: incidents in where on a CPU
|
|
|
|
* the kernel does not reschedule for 10 seconds or more.
|
|
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/cpu.h>
|
2008-01-26 04:08:02 +08:00
|
|
|
#include <linux/nmi.h>
|
2005-09-07 06:16:27 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/delay.h>
|
2007-07-17 19:03:35 +08:00
|
|
|
#include <linux/freezer.h>
|
2005-09-07 06:16:27 +08:00
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
2007-10-17 14:26:08 +08:00
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
2005-09-07 06:16:27 +08:00
|
|
|
static DEFINE_SPINLOCK(print_lock);
|
|
|
|
|
2006-03-24 19:18:41 +08:00
|
|
|
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
|
|
|
|
static DEFINE_PER_CPU(unsigned long, print_timestamp);
|
2005-09-07 06:16:27 +08:00
|
|
|
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
|
|
|
|
|
2008-01-26 04:08:34 +08:00
|
|
|
static int __read_mostly did_panic;
|
|
|
|
unsigned long __read_mostly softlockup_thresh = 60;
|
2006-03-24 19:18:41 +08:00
|
|
|
|
|
|
|
static int
|
|
|
|
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
|
2005-09-07 06:16:27 +08:00
|
|
|
{
|
|
|
|
did_panic = 1;
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block panic_block = {
|
|
|
|
.notifier_call = softlock_panic,
|
|
|
|
};
|
|
|
|
|
2007-05-08 15:28:02 +08:00
|
|
|
/*
|
|
|
|
* Returns seconds, approximately. We don't need nanosecond
|
|
|
|
* resolution, and we don't need to waste time with a big divide when
|
|
|
|
* 2^30ns == 1.074s.
|
|
|
|
*/
|
2007-10-17 14:26:06 +08:00
|
|
|
static unsigned long get_timestamp(int this_cpu)
|
2007-05-08 15:28:02 +08:00
|
|
|
{
|
2008-01-26 04:08:02 +08:00
|
|
|
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
|
2007-05-08 15:28:02 +08:00
|
|
|
}
|
|
|
|
|
2005-09-07 06:16:27 +08:00
|
|
|
void touch_softlockup_watchdog(void)
|
|
|
|
{
|
2007-10-17 14:26:06 +08:00
|
|
|
int this_cpu = raw_smp_processor_id();
|
|
|
|
|
|
|
|
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
2005-09-07 06:16:27 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
|
|
|
|
2007-05-08 15:28:05 +08:00
|
|
|
void touch_all_softlockup_watchdogs(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* Cause each CPU to re-update its timestamp rather than complain */
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
per_cpu(touch_timestamp, cpu) = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
|
|
|
|
2005-09-07 06:16:27 +08:00
|
|
|
/*
|
|
|
|
* This callback runs from the timer interrupt, and checks
|
|
|
|
* whether the watchdog thread has hung or not:
|
|
|
|
*/
|
2006-03-24 19:18:41 +08:00
|
|
|
void softlockup_tick(void)
|
2005-09-07 06:16:27 +08:00
|
|
|
{
|
|
|
|
int this_cpu = smp_processor_id();
|
2006-03-24 19:18:41 +08:00
|
|
|
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
|
2007-05-08 15:28:02 +08:00
|
|
|
unsigned long print_timestamp;
|
2007-10-17 14:26:08 +08:00
|
|
|
struct pt_regs *regs = get_irq_regs();
|
2007-05-08 15:28:02 +08:00
|
|
|
unsigned long now;
|
2005-09-07 06:16:27 +08:00
|
|
|
|
2007-05-08 15:28:05 +08:00
|
|
|
if (touch_timestamp == 0) {
|
|
|
|
touch_softlockup_watchdog();
|
2007-05-08 15:28:02 +08:00
|
|
|
return;
|
2007-05-08 15:28:05 +08:00
|
|
|
}
|
2007-05-08 15:28:02 +08:00
|
|
|
|
|
|
|
print_timestamp = per_cpu(print_timestamp, this_cpu);
|
|
|
|
|
|
|
|
/* report at most once a second */
|
2007-10-17 14:26:06 +08:00
|
|
|
if ((print_timestamp >= touch_timestamp &&
|
|
|
|
print_timestamp < (touch_timestamp + 1)) ||
|
|
|
|
did_panic || !per_cpu(watchdog_task, this_cpu)) {
|
2005-09-07 06:16:27 +08:00
|
|
|
return;
|
2007-10-17 14:26:06 +08:00
|
|
|
}
|
2005-09-07 06:16:27 +08:00
|
|
|
|
2006-03-24 19:18:41 +08:00
|
|
|
/* do not print during early bootup: */
|
|
|
|
if (unlikely(system_state != SYSTEM_RUNNING)) {
|
|
|
|
touch_softlockup_watchdog();
|
2005-09-07 06:16:27 +08:00
|
|
|
return;
|
2006-03-24 19:18:41 +08:00
|
|
|
}
|
2005-09-07 06:16:27 +08:00
|
|
|
|
2007-10-17 14:26:06 +08:00
|
|
|
now = get_timestamp(this_cpu);
|
2007-05-08 15:28:02 +08:00
|
|
|
|
2008-01-26 04:08:02 +08:00
|
|
|
/* Warn about unreasonable delays: */
|
2007-10-17 14:26:09 +08:00
|
|
|
if (now <= (touch_timestamp + softlockup_thresh))
|
2007-10-17 14:26:08 +08:00
|
|
|
return;
|
2005-09-07 06:16:27 +08:00
|
|
|
|
2007-10-17 14:26:08 +08:00
|
|
|
per_cpu(print_timestamp, this_cpu) = touch_timestamp;
|
|
|
|
|
|
|
|
spin_lock(&print_lock);
|
2007-10-17 14:26:09 +08:00
|
|
|
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
|
|
|
this_cpu, now - touch_timestamp,
|
2007-10-19 14:40:40 +08:00
|
|
|
current->comm, task_pid_nr(current));
|
2007-10-17 14:26:08 +08:00
|
|
|
if (regs)
|
|
|
|
show_regs(regs);
|
|
|
|
else
|
2006-03-24 19:18:41 +08:00
|
|
|
dump_stack();
|
2007-10-17 14:26:08 +08:00
|
|
|
spin_unlock(&print_lock);
|
2005-09-07 06:16:27 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 04:08:02 +08:00
|
|
|
/*
|
|
|
|
* Have a reasonable limit on the number of tasks checked:
|
|
|
|
*/
|
2008-01-26 04:08:34 +08:00
|
|
|
unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
|
2008-01-26 04:08:02 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero means infinite timeout - no checking done:
|
|
|
|
*/
|
2008-01-26 04:08:34 +08:00
|
|
|
unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
|
2008-01-26 04:08:02 +08:00
|
|
|
|
2008-01-26 04:08:34 +08:00
|
|
|
unsigned long __read_mostly sysctl_hung_task_warnings = 10;
|
2008-01-26 04:08:02 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only do the hung-tasks check on one CPU:
|
|
|
|
*/
|
|
|
|
static int check_cpu __read_mostly = -1;
|
|
|
|
|
|
|
|
static void check_hung_task(struct task_struct *t, unsigned long now)
|
|
|
|
{
|
|
|
|
unsigned long switch_count = t->nvcsw + t->nivcsw;
|
|
|
|
|
|
|
|
if (t->flags & PF_FROZEN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
|
|
|
|
t->last_switch_count = switch_count;
|
|
|
|
t->last_switch_timestamp = now;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if ((long)(now - t->last_switch_timestamp) <
|
|
|
|
sysctl_hung_task_timeout_secs)
|
|
|
|
return;
|
|
|
|
if (sysctl_hung_task_warnings < 0)
|
|
|
|
return;
|
|
|
|
sysctl_hung_task_warnings--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, the task did not get scheduled for more than 2 minutes,
|
|
|
|
* complain:
|
|
|
|
*/
|
|
|
|
printk(KERN_ERR "INFO: task %s:%d blocked for more than "
|
|
|
|
"%ld seconds.\n", t->comm, t->pid,
|
|
|
|
sysctl_hung_task_timeout_secs);
|
|
|
|
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
|
|
|
" disables this message.\n");
|
|
|
|
sched_show_task(t);
|
|
|
|
__debug_show_held_locks(t);
|
|
|
|
|
|
|
|
t->last_switch_timestamp = now;
|
|
|
|
touch_nmi_watchdog();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
|
|
|
|
* a really long time (120 seconds). If that happens, print out
|
|
|
|
* a warning.
|
|
|
|
*/
|
|
|
|
static void check_hung_uninterruptible_tasks(int this_cpu)
|
|
|
|
{
|
|
|
|
int max_count = sysctl_hung_task_check_count;
|
|
|
|
unsigned long now = get_timestamp(this_cpu);
|
|
|
|
struct task_struct *g, *t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the system crashed already then all bets are off,
|
|
|
|
* do not report extra hung tasks:
|
|
|
|
*/
|
|
|
|
if ((tainted & TAINT_DIE) || did_panic)
|
|
|
|
return;
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
do_each_thread(g, t) {
|
|
|
|
if (!--max_count)
|
|
|
|
break;
|
|
|
|
if (t->state & TASK_UNINTERRUPTIBLE)
|
|
|
|
check_hung_task(t, now);
|
|
|
|
} while_each_thread(g, t);
|
|
|
|
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
}
|
|
|
|
|
2005-09-07 06:16:27 +08:00
|
|
|
/*
|
|
|
|
* The watchdog thread - runs every second and touches the timestamp.
|
|
|
|
*/
|
2007-10-17 14:26:08 +08:00
|
|
|
static int watchdog(void *__bind_cpu)
|
2005-09-07 06:16:27 +08:00
|
|
|
{
|
2007-05-08 15:24:03 +08:00
|
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
2008-01-26 04:08:02 +08:00
|
|
|
int this_cpu = (long)__bind_cpu;
|
2005-09-07 06:16:27 +08:00
|
|
|
|
|
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
|
|
|
2007-05-08 15:28:02 +08:00
|
|
|
/* initialize timestamp */
|
|
|
|
touch_softlockup_watchdog();
|
|
|
|
|
2005-09-07 06:16:27 +08:00
|
|
|
/*
|
2006-03-24 19:18:41 +08:00
|
|
|
* Run briefly once per second to reset the softlockup timestamp.
|
2008-01-26 04:08:02 +08:00
|
|
|
* If this gets delayed for more than 60 seconds then the
|
2006-03-24 19:18:41 +08:00
|
|
|
* debug-printout triggers in softlockup_tick().
|
2005-09-07 06:16:27 +08:00
|
|
|
*/
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
touch_softlockup_watchdog();
|
2008-01-26 04:08:02 +08:00
|
|
|
msleep_interruptible(10000);
|
|
|
|
|
|
|
|
if (this_cpu != check_cpu)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sysctl_hung_task_timeout_secs)
|
|
|
|
check_hung_uninterruptible_tasks(this_cpu);
|
2005-09-07 06:16:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create/destroy watchdog threads as CPUs come and go:
|
|
|
|
*/
|
2006-07-30 18:03:35 +08:00
|
|
|
static int __cpuinit
|
2005-09-07 06:16:27 +08:00
|
|
|
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
|
{
|
|
|
|
int hotcpu = (unsigned long)hcpu;
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case CPU_UP_PREPARE:
|
2007-05-09 17:35:10 +08:00
|
|
|
case CPU_UP_PREPARE_FROZEN:
|
2005-09-07 06:16:27 +08:00
|
|
|
BUG_ON(per_cpu(watchdog_task, hotcpu));
|
|
|
|
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
|
|
|
if (IS_ERR(p)) {
|
2007-10-17 14:26:08 +08:00
|
|
|
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
2005-09-07 06:16:27 +08:00
|
|
|
return NOTIFY_BAD;
|
|
|
|
}
|
2007-10-17 14:26:08 +08:00
|
|
|
per_cpu(touch_timestamp, hotcpu) = 0;
|
|
|
|
per_cpu(watchdog_task, hotcpu) = p;
|
2005-09-07 06:16:27 +08:00
|
|
|
kthread_bind(p, hotcpu);
|
2007-10-17 14:26:08 +08:00
|
|
|
break;
|
2005-09-07 06:16:27 +08:00
|
|
|
case CPU_ONLINE:
|
2007-05-09 17:35:10 +08:00
|
|
|
case CPU_ONLINE_FROZEN:
|
2008-01-26 04:08:02 +08:00
|
|
|
check_cpu = any_online_cpu(cpu_online_map);
|
2005-09-07 06:16:27 +08:00
|
|
|
wake_up_process(per_cpu(watchdog_task, hotcpu));
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
case CPU_UP_CANCELED:
|
2007-05-09 17:35:10 +08:00
|
|
|
case CPU_UP_CANCELED_FROZEN:
|
2006-06-25 20:49:10 +08:00
|
|
|
if (!per_cpu(watchdog_task, hotcpu))
|
|
|
|
break;
|
2005-09-07 06:16:27 +08:00
|
|
|
/* Unbind so it can run. Fall thru. */
|
2005-11-07 16:58:38 +08:00
|
|
|
kthread_bind(per_cpu(watchdog_task, hotcpu),
|
|
|
|
any_online_cpu(cpu_online_map));
|
2008-01-26 04:08:02 +08:00
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
|
|
if (hotcpu == check_cpu) {
|
|
|
|
cpumask_t temp_cpu_online_map = cpu_online_map;
|
|
|
|
|
|
|
|
cpu_clear(hotcpu, temp_cpu_online_map);
|
|
|
|
check_cpu = any_online_cpu(temp_cpu_online_map);
|
|
|
|
}
|
|
|
|
break;
|
2005-09-07 06:16:27 +08:00
|
|
|
case CPU_DEAD:
|
2007-05-09 17:35:10 +08:00
|
|
|
case CPU_DEAD_FROZEN:
|
2005-09-07 06:16:27 +08:00
|
|
|
p = per_cpu(watchdog_task, hotcpu);
|
|
|
|
per_cpu(watchdog_task, hotcpu) = NULL;
|
|
|
|
kthread_stop(p);
|
|
|
|
break;
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
2007-10-17 14:26:08 +08:00
|
|
|
}
|
2005-09-07 06:16:27 +08:00
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2006-07-30 18:03:35 +08:00
|
|
|
static struct notifier_block __cpuinitdata cpu_nfb = {
|
2005-09-07 06:16:27 +08:00
|
|
|
.notifier_call = cpu_callback
|
|
|
|
};
|
|
|
|
|
|
|
|
__init void spawn_softlockup_task(void)
|
|
|
|
{
|
|
|
|
void *cpu = (void *)(long)smp_processor_id();
|
2006-09-29 17:00:22 +08:00
|
|
|
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
|
2005-09-07 06:16:27 +08:00
|
|
|
|
2006-09-29 17:00:22 +08:00
|
|
|
BUG_ON(err == NOTIFY_BAD);
|
2005-09-07 06:16:27 +08:00
|
|
|
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
|
|
|
|
register_cpu_notifier(&cpu_nfb);
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 17:16:30 +08:00
|
|
|
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
2005-09-07 06:16:27 +08:00
|
|
|
}
|