2006-12-08 01:51:35 +08:00
|
|
|
/*
|
|
|
|
* arch/ia64/kernel/crash.c
|
|
|
|
*
|
|
|
|
* Architecture specific (ia64) functions for kexec based crash dumps.
|
|
|
|
*
|
|
|
|
* Created by: Khalid Aziz <khalid.aziz@hp.com>
|
|
|
|
* Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
|
|
|
|
* Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/crash_dump.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/kexec.h>
|
|
|
|
#include <linux/elfcore.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <linux/init.h>
|
2007-05-08 15:27:03 +08:00
|
|
|
#include <linux/kdebug.h>
|
2006-12-08 01:51:35 +08:00
|
|
|
|
|
|
|
#include <asm/mca.h>
|
|
|
|
|
|
|
|
int kdump_status[NR_CPUS];
|
2007-02-14 15:15:02 +08:00
|
|
|
static atomic_t kdump_cpu_frozen;
|
2006-12-08 01:51:35 +08:00
|
|
|
atomic_t kdump_in_progress;
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
static int kdump_freeze_monarch;
|
2007-02-14 15:15:02 +08:00
|
|
|
static int kdump_on_init = 1;
|
2008-04-08 12:31:47 +08:00
|
|
|
static int kdump_on_fatal_mca = 1;
|
2006-12-08 01:51:35 +08:00
|
|
|
|
|
|
|
static inline Elf64_Word
|
|
|
|
*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
|
|
|
|
size_t data_len)
|
|
|
|
{
|
|
|
|
struct elf_note *note = (struct elf_note *)buf;
|
|
|
|
note->n_namesz = strlen(name) + 1;
|
|
|
|
note->n_descsz = data_len;
|
|
|
|
note->n_type = type;
|
|
|
|
buf += (sizeof(*note) + 3)/4;
|
|
|
|
memcpy(buf, name, note->n_namesz);
|
|
|
|
buf += (note->n_namesz + 3)/4;
|
|
|
|
memcpy(buf, data, data_len);
|
|
|
|
buf += (data_len + 3)/4;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
final_note(void *buf)
|
|
|
|
{
|
|
|
|
memset(buf, 0, sizeof(struct elf_note));
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void ia64_dump_cpu_regs(void *);
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
|
|
|
|
|
|
|
|
void
|
2007-02-10 00:38:10 +08:00
|
|
|
crash_save_this_cpu(void)
|
2006-12-08 01:51:35 +08:00
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
unsigned long cfm, sof, sol;
|
|
|
|
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
|
|
|
|
|
|
|
|
elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
|
|
|
|
memset(prstatus, 0, sizeof(*prstatus));
|
|
|
|
prstatus->pr_pid = current->pid;
|
|
|
|
|
|
|
|
ia64_dump_cpu_regs(dst);
|
|
|
|
cfm = dst[43];
|
|
|
|
sol = (cfm >> 7) & 0x7f;
|
|
|
|
sof = cfm & 0x7f;
|
|
|
|
dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
|
|
|
|
sof - sol);
|
|
|
|
|
|
|
|
buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
|
|
|
|
if (!buf)
|
|
|
|
return;
|
2007-05-08 15:28:22 +08:00
|
|
|
buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
|
2006-12-08 01:51:35 +08:00
|
|
|
sizeof(*prstatus));
|
|
|
|
final_note(buf);
|
|
|
|
}
|
|
|
|
|
2007-02-05 14:43:42 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2006-12-08 01:51:35 +08:00
|
|
|
static int
|
|
|
|
kdump_wait_cpu_freeze(void)
|
|
|
|
{
|
|
|
|
int cpu_num = num_online_cpus() - 1;
|
|
|
|
int timeout = 1000;
|
|
|
|
while(timeout-- > 0) {
|
2007-02-14 15:15:02 +08:00
|
|
|
if (atomic_read(&kdump_cpu_frozen) == cpu_num)
|
2006-12-08 01:51:35 +08:00
|
|
|
return 0;
|
|
|
|
udelay(1000);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
2007-02-05 14:43:42 +08:00
|
|
|
#endif
|
2006-12-08 01:51:35 +08:00
|
|
|
|
|
|
|
void
|
|
|
|
machine_crash_shutdown(struct pt_regs *pt)
|
|
|
|
{
|
|
|
|
/* This function is only called after the system
|
|
|
|
* has paniced or is otherwise in a critical state.
|
|
|
|
* The minimum amount of code to allow a kexec'd kernel
|
|
|
|
* to run successfully needs to happen here.
|
|
|
|
*
|
|
|
|
* In practice this means shooting down the other cpus in
|
|
|
|
* an SMP system.
|
|
|
|
*/
|
|
|
|
kexec_disable_iosapic();
|
|
|
|
#ifdef CONFIG_SMP
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
/*
|
|
|
|
* If kdump_on_init is set and an INIT is asserted here, kdump will
|
|
|
|
* be started again via INIT monarch.
|
|
|
|
*/
|
|
|
|
local_irq_disable();
|
|
|
|
ia64_set_psr_mc(); /* mask MCA/INIT */
|
|
|
|
if (atomic_inc_return(&kdump_in_progress) != 1)
|
|
|
|
unw_init_running(kdump_cpu_freeze, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now this cpu is ready for kdump.
|
|
|
|
* Stop all others by IPI or INIT. They could receive INIT from
|
|
|
|
* outside and might be INIT monarch, but only thing they have to
|
|
|
|
* do is falling into kdump_cpu_freeze().
|
|
|
|
*
|
|
|
|
* If an INIT is asserted here:
|
|
|
|
* - All receivers might be slaves, since some of cpus could already
|
|
|
|
* be frozen and INIT might be masked on monarch. In this case,
|
2009-08-07 05:51:58 +08:00
|
|
|
* all slaves will be frozen soon since kdump_in_progress will let
|
|
|
|
* them into DIE_INIT_SLAVE_LEAVE.
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
* - One might be a monarch, but INIT rendezvous will fail since
|
|
|
|
* at least this cpu already have INIT masked so it never join
|
|
|
|
* to the rendezvous. In this case, all slaves and monarch will
|
2009-08-07 05:51:58 +08:00
|
|
|
* be frozen soon with no wait since the INIT rendezvous is skipped
|
|
|
|
* by kdump_in_progress.
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
*/
|
2006-12-08 01:51:35 +08:00
|
|
|
kdump_smp_send_stop();
|
2007-02-14 15:15:02 +08:00
|
|
|
/* not all cpu response to IPI, send INIT to freeze them */
|
2009-08-07 05:51:57 +08:00
|
|
|
if (kdump_wait_cpu_freeze()) {
|
2006-12-08 01:51:35 +08:00
|
|
|
kdump_smp_send_init();
|
2009-08-07 05:51:57 +08:00
|
|
|
/* wait again, don't go ahead if possible */
|
|
|
|
kdump_wait_cpu_freeze();
|
2006-12-08 01:51:35 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
machine_kdump_on_init(void)
|
|
|
|
{
|
2008-04-15 17:59:54 +08:00
|
|
|
crash_save_vmcoreinfo();
|
2006-12-08 01:51:35 +08:00
|
|
|
local_irq_disable();
|
|
|
|
kexec_disable_iosapic();
|
|
|
|
machine_kexec(ia64_kimage);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
|
|
|
|
{
|
|
|
|
int cpuid;
|
[IA64] kdump: Mask MCA/INIT on frozen cpus
Summary:
INIT asserted on kdump kernel invokes INIT handler not only on a
cpu that running on the kdump kernel, but also BSP of the panicked
kernel, because the (badly) frozen BSP can be thawed by INIT.
Description:
The kdump_cpu_freeze() is called on cpus except one that initiates
panic and/or kdump, to stop/offline the cpu (on ia64, it means we
pass control of cpus to SAL, or put them in spinloop). Note that
CPU0(BSP) always go to spinloop, so if panic was happened on an AP,
there are at least 2cpus (= the AP and BSP) which not back to SAL.
On the spinning cpus, interrupts are disabled (rsm psr.i), but INIT
is still interruptible because psr.mc for mask them is not set unless
kdump_cpu_freeze() is not called from MCA/INIT context.
Therefore, assume that a panic was happened on an AP, kdump was
invoked, new INIT handlers for kdump kernel was registered and then
an INIT is asserted. From the viewpoint of SAL, there are 2 online
cpus, so INIT will be delivered to both of them. It likely means
that not only the AP (= a cpu executing kdump) enters INIT handler
which is newly registered, but also BSP (= another cpu spinning in
panicked kernel) enters the same INIT handler. Of course setting of
registers in BSP are still old (for panicked kernel), so what happen
with running handler with wrong setting will be extremely unexpected.
I believe this is not desirable behavior.
How to Reproduce:
Start kdump on one of APs (e.g. cpu1)
# taskset 0x2 echo c > /proc/sysrq-trigger
Then assert INIT after kdump kernel is booted, after new INIT handler
for kdump kernel is registered.
Expected results:
An INIT handler is invoked only on the AP.
Actual results:
An INIT handler is invoked on the AP and BSP.
Sample of results:
I got following console log by asserting INIT after prompt "root:/>".
It seems that two monarchs appeared by one INIT, and one panicked at
last. And it also seems that the panicked one supposed there were
4 online cpus and no one did rendezvous:
:
[ 0 %]dropping to initramfs shell
exiting this shell will reboot your system
root:/> Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=0
ia64_init_handler: Promoting cpu 0 to monarch.
Delaying for 5 seconds...
All OS INIT slaves have reached rendezvous
Processes interrupted by INIT - 0 (cpu 0 task 0xa000000100af0000)
:
<<snip>>
:
Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=1
Delaying for 5 seconds...
mlogbuf_finish: printing switched to urgent mode, MCA/INIT might be dodgy or fail.
OS INIT slave did not rendezvous on cpu 1 2 3
INIT swapper 0[0]: bugcheck! 0 [1]
:
<<snip>>
:
Kernel panic - not syncing: Attempted to kill the idle task!
Proposed fix:
To avoid this problem, this patch inserts ia64_set_psr_mc() to mask
INIT on cpus going to be frozen. This masking have no effect if the
kdump_cpu_freeze() is called from INIT handler when kdump_on_init == 1,
because psr.mc is already turned on to 1 before entering OS_INIT.
I confirmed that weird log like above are disappeared after applying
this patch.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:56 +08:00
|
|
|
|
2006-12-08 01:51:35 +08:00
|
|
|
local_irq_disable();
|
|
|
|
cpuid = smp_processor_id();
|
|
|
|
crash_save_this_cpu();
|
|
|
|
current->thread.ksp = (__u64)info->sw - 16;
|
[IA64] kdump: Mask MCA/INIT on frozen cpus
Summary:
INIT asserted on kdump kernel invokes INIT handler not only on a
cpu that running on the kdump kernel, but also BSP of the panicked
kernel, because the (badly) frozen BSP can be thawed by INIT.
Description:
The kdump_cpu_freeze() is called on cpus except one that initiates
panic and/or kdump, to stop/offline the cpu (on ia64, it means we
pass control of cpus to SAL, or put them in spinloop). Note that
CPU0(BSP) always go to spinloop, so if panic was happened on an AP,
there are at least 2cpus (= the AP and BSP) which not back to SAL.
On the spinning cpus, interrupts are disabled (rsm psr.i), but INIT
is still interruptible because psr.mc for mask them is not set unless
kdump_cpu_freeze() is not called from MCA/INIT context.
Therefore, assume that a panic was happened on an AP, kdump was
invoked, new INIT handlers for kdump kernel was registered and then
an INIT is asserted. From the viewpoint of SAL, there are 2 online
cpus, so INIT will be delivered to both of them. It likely means
that not only the AP (= a cpu executing kdump) enters INIT handler
which is newly registered, but also BSP (= another cpu spinning in
panicked kernel) enters the same INIT handler. Of course setting of
registers in BSP are still old (for panicked kernel), so what happen
with running handler with wrong setting will be extremely unexpected.
I believe this is not desirable behavior.
How to Reproduce:
Start kdump on one of APs (e.g. cpu1)
# taskset 0x2 echo c > /proc/sysrq-trigger
Then assert INIT after kdump kernel is booted, after new INIT handler
for kdump kernel is registered.
Expected results:
An INIT handler is invoked only on the AP.
Actual results:
An INIT handler is invoked on the AP and BSP.
Sample of results:
I got following console log by asserting INIT after prompt "root:/>".
It seems that two monarchs appeared by one INIT, and one panicked at
last. And it also seems that the panicked one supposed there were
4 online cpus and no one did rendezvous:
:
[ 0 %]dropping to initramfs shell
exiting this shell will reboot your system
root:/> Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=0
ia64_init_handler: Promoting cpu 0 to monarch.
Delaying for 5 seconds...
All OS INIT slaves have reached rendezvous
Processes interrupted by INIT - 0 (cpu 0 task 0xa000000100af0000)
:
<<snip>>
:
Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=1
Delaying for 5 seconds...
mlogbuf_finish: printing switched to urgent mode, MCA/INIT might be dodgy or fail.
OS INIT slave did not rendezvous on cpu 1 2 3
INIT swapper 0[0]: bugcheck! 0 [1]
:
<<snip>>
:
Kernel panic - not syncing: Attempted to kill the idle task!
Proposed fix:
To avoid this problem, this patch inserts ia64_set_psr_mc() to mask
INIT on cpus going to be frozen. This masking have no effect if the
kdump_cpu_freeze() is called from INIT handler when kdump_on_init == 1,
because psr.mc is already turned on to 1 before entering OS_INIT.
I confirmed that weird log like above are disappeared after applying
this patch.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:56 +08:00
|
|
|
|
|
|
|
ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
|
|
|
|
|
2007-02-14 15:15:02 +08:00
|
|
|
atomic_inc(&kdump_cpu_frozen);
|
2006-12-08 01:51:35 +08:00
|
|
|
kdump_status[cpuid] = 1;
|
|
|
|
mb();
|
2007-02-05 14:43:42 +08:00
|
|
|
for (;;)
|
|
|
|
cpu_relax();
|
2006-12-08 01:51:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
struct ia64_mca_notify_die *nd;
|
|
|
|
struct die_args *args = data;
|
|
|
|
|
2009-08-07 05:51:57 +08:00
|
|
|
if (atomic_read(&kdump_in_progress)) {
|
|
|
|
switch (val) {
|
|
|
|
case DIE_INIT_MONARCH_LEAVE:
|
|
|
|
if (!kdump_freeze_monarch)
|
|
|
|
break;
|
|
|
|
/* fall through */
|
|
|
|
case DIE_INIT_SLAVE_LEAVE:
|
2009-08-07 05:51:58 +08:00
|
|
|
case DIE_INIT_MONARCH_ENTER:
|
2009-08-07 05:51:57 +08:00
|
|
|
case DIE_MCA_RENDZVOUS_LEAVE:
|
|
|
|
unw_init_running(kdump_cpu_freeze, NULL);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-08 12:31:47 +08:00
|
|
|
if (!kdump_on_init && !kdump_on_fatal_mca)
|
2006-12-08 01:51:35 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
2007-09-01 15:37:48 +08:00
|
|
|
if (!ia64_kimage) {
|
|
|
|
if (val == DIE_INIT_MONARCH_LEAVE)
|
|
|
|
ia64_mca_printk(KERN_NOTICE
|
|
|
|
"%s: kdump not configured\n",
|
2008-03-05 07:15:00 +08:00
|
|
|
__func__);
|
2007-09-01 15:37:48 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2007-04-04 08:53:42 +08:00
|
|
|
if (val != DIE_INIT_MONARCH_LEAVE &&
|
|
|
|
val != DIE_INIT_MONARCH_PROCESS &&
|
2006-12-08 01:51:35 +08:00
|
|
|
val != DIE_MCA_MONARCH_LEAVE)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
nd = (struct ia64_mca_notify_die *)args->err;
|
|
|
|
|
|
|
|
switch (val) {
|
2008-04-08 12:33:08 +08:00
|
|
|
case DIE_INIT_MONARCH_PROCESS:
|
2009-08-07 05:51:57 +08:00
|
|
|
/* Reason code 1 means machine check rendezvous*/
|
|
|
|
if (kdump_on_init && (nd->sos->rv_rc != 1)) {
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
if (atomic_inc_return(&kdump_in_progress) != 1)
|
|
|
|
kdump_freeze_monarch = 1;
|
2008-04-08 12:33:08 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DIE_INIT_MONARCH_LEAVE:
|
2009-08-07 05:51:57 +08:00
|
|
|
/* Reason code 1 means machine check rendezvous*/
|
|
|
|
if (kdump_on_init && (nd->sos->rv_rc != 1))
|
|
|
|
machine_kdump_on_init();
|
2008-04-08 12:33:08 +08:00
|
|
|
break;
|
|
|
|
case DIE_MCA_MONARCH_LEAVE:
|
2008-04-17 16:00:37 +08:00
|
|
|
/* *(nd->data) indicate if MCA is recoverable */
|
|
|
|
if (kdump_on_fatal_mca && !(*(nd->data))) {
|
2009-08-07 05:51:58 +08:00
|
|
|
if (atomic_inc_return(&kdump_in_progress) == 1)
|
[IA64] kdump: Mask INIT first in panic-kdump path
Summary:
Asserting INIT might block kdump if the system is already going to
start kdump via panic.
Description:
INIT can interrupt anywhere in panic path, so it can interrupt in
middle of kdump kicked by panic. Therefore there is a race if kdump
is kicked concurrently, via Panic and via INIT.
INIT could fail to invoke kdump if the system is already going to
start kdump via panic. It could not restart kdump from INIT handler
if some of cpus are already playing dead with INIT masked. It also
means that INIT could block kdump's progress if no monarch is entered
in the INIT rendezvous.
Panic+INIT is a rare, but possible situation since it can be assumed
that the kernel or an internal agent decides to panic the unstable
system while another external agent decides to send an INIT to the
system at same time.
How to reproduce:
Assert INIT just after panic, before all other cpus have frozen
Expected results:
continue kdump invoked by panic, or restart kdump from INIT
Actual results:
might be hang, crashdump not retrieved
Proposed Fix:
This patch masks INIT first in panic path to take the initiative on
kdump, and reuse atomic value kdump_in_progress to make sure there is
only one initiator of kdump. All INITs asserted later should be used
only for freezing all other cpus.
This mask will be removed soon by rfi in relocate_kernel.S, before jump
into kdump kernel, after all cpus are frozen and no-op INIT handler is
registered. So if INIT was in the interval while it is masked, it will
pend on the system and will received just after the rfi, and handled by
the no-op handler.
If there was a MCA event while psr.mc is 1, in theory the event will
pend on the system and will received just after the rfi same as above.
MCA handler is unregistered here at the time, so received MCA will not
reach to OS_MCA and will result in warmboot by SAL.
Note that codes in this masked interval are relatively simpler than
that in MCA/INIT handler which also executed with the mask. So it can
be said that probability of error in this interval is supposed not so
higher than that in MCA/INIT handler.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 05:51:57 +08:00
|
|
|
machine_kdump_on_init();
|
|
|
|
/* We got fatal MCA while kdump!? No way!! */
|
2008-04-08 12:33:08 +08:00
|
|
|
}
|
|
|
|
break;
|
2006-12-08 01:51:35 +08:00
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSCTL
|
2014-06-07 05:37:55 +08:00
|
|
|
static struct ctl_table kdump_ctl_table[] = {
|
2006-12-08 01:51:35 +08:00
|
|
|
{
|
|
|
|
.procname = "kdump_on_init",
|
|
|
|
.data = &kdump_on_init,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec,
|
2006-12-08 01:51:35 +08:00
|
|
|
},
|
2008-04-08 12:31:47 +08:00
|
|
|
{
|
|
|
|
.procname = "kdump_on_fatal_mca",
|
|
|
|
.data = &kdump_on_fatal_mca,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec,
|
2008-04-08 12:31:47 +08:00
|
|
|
},
|
2009-04-03 20:15:37 +08:00
|
|
|
{ }
|
2006-12-08 01:51:35 +08:00
|
|
|
};
|
|
|
|
|
2014-06-07 05:37:55 +08:00
|
|
|
static struct ctl_table sys_table[] = {
|
2006-12-08 01:51:35 +08:00
|
|
|
{
|
|
|
|
.procname = "kernel",
|
|
|
|
.mode = 0555,
|
2008-04-08 12:31:47 +08:00
|
|
|
.child = kdump_ctl_table,
|
2006-12-08 01:51:35 +08:00
|
|
|
},
|
2009-04-03 20:15:37 +08:00
|
|
|
{ }
|
2006-12-08 01:51:35 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int
|
|
|
|
machine_crash_setup(void)
|
|
|
|
{
|
2007-04-04 08:53:42 +08:00
|
|
|
/* be notified before default_monarch_init_process */
|
2006-12-08 01:51:35 +08:00
|
|
|
static struct notifier_block kdump_init_notifier_nb = {
|
|
|
|
.notifier_call = kdump_init_notifier,
|
2007-04-04 08:53:42 +08:00
|
|
|
.priority = 1,
|
2006-12-08 01:51:35 +08:00
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
|
|
|
|
return ret;
|
|
|
|
#ifdef CONFIG_SYSCTL
|
2007-02-14 16:34:09 +08:00
|
|
|
register_sysctl_table(sys_table);
|
2006-12-08 01:51:35 +08:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__initcall(machine_crash_setup);
|
|
|
|
|