kgdb: fix SMP NMI kgdb_handle_exception exit race
Fix the problem of protecting the kgdb handle_exception exit which had an NMI race condition, while trying to restore normal system operation. There was a small window after the master processor sets cpu_in_debug to zero but before it has set kgdb_active to zero where a non-master processor in an SMP system could receive an NMI and re-enter the kgdb_wait() loop. As long as the master processor sets the cpu_in_debug before sending the cpu roundup the cpu_in_debug variable can also be used to guard against the race condition. The kgdb_wait() function no longer needs to check kgdb_active because it is done in the arch specific code and handled along with the nmi traps at the low level. This also allows kgdb_wait() to exit correctly if it was entered for some unknown reason due to a spurious NMI that could not be handled by the arch specific code. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
225a4424ad
commit
56fb709329
|
@ -561,18 +561,6 @@ static void kgdb_wait(struct pt_regs *regs)
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
atomic_set(&cpu_in_kgdb[cpu], 1);
|
atomic_set(&cpu_in_kgdb[cpu], 1);
|
||||||
|
|
||||||
/*
|
|
||||||
* The primary CPU must be active to enter here, but this is
|
|
||||||
* guard in case the primary CPU had not been selected if
|
|
||||||
* this was an entry via nmi.
|
|
||||||
*/
|
|
||||||
while (atomic_read(&kgdb_active) == -1)
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
/* Wait till primary CPU goes completely into the debugger. */
|
|
||||||
while (!atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)]))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
/* Wait till primary CPU is done with debugging */
|
/* Wait till primary CPU is done with debugging */
|
||||||
while (atomic_read(&passive_cpu_wait[cpu]))
|
while (atomic_read(&passive_cpu_wait[cpu]))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
@ -1447,18 +1435,18 @@ acquirelock:
|
||||||
atomic_set(&passive_cpu_wait[i], 1);
|
atomic_set(&passive_cpu_wait[i], 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Signal the other CPUs to enter kgdb_wait() */
|
|
||||||
if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
|
|
||||||
kgdb_roundup_cpus(flags);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* spin_lock code is good enough as a barrier so we don't
|
* spin_lock code is good enough as a barrier so we don't
|
||||||
* need one here:
|
* need one here:
|
||||||
*/
|
*/
|
||||||
atomic_set(&cpu_in_kgdb[ks->cpu], 1);
|
atomic_set(&cpu_in_kgdb[ks->cpu], 1);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Signal the other CPUs to enter kgdb_wait() */
|
||||||
|
if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
|
||||||
|
kgdb_roundup_cpus(flags);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for the other CPUs to be notified and be waiting for us:
|
* Wait for the other CPUs to be notified and be waiting for us:
|
||||||
*/
|
*/
|
||||||
|
@ -1514,7 +1502,8 @@ int kgdb_nmicallback(int cpu, void *regs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (!atomic_read(&cpu_in_kgdb[cpu]) &&
|
if (!atomic_read(&cpu_in_kgdb[cpu]) &&
|
||||||
atomic_read(&kgdb_active) != cpu) {
|
atomic_read(&kgdb_active) != cpu &&
|
||||||
|
atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
|
||||||
kgdb_wait((struct pt_regs *)regs);
|
kgdb_wait((struct pt_regs *)regs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue