[PATCH] sched: disable preempt in idle tasks
Run idle threads with preempt disabled. Also corrected a bugs in arm26's cpu_idle (make it actually call schedule()). How did it ever work before? Might fix the CPU hotplugging hang which Nigel Cunningham noted. We think the bug hits if the idle thread is preempted after checking need_resched() and before going to sleep, then the CPU offlined. After calling stop_machine_run, the CPU eventually returns from preemption and into the idle thread and goes to sleep. The CPU will continue executing previous idle and have no chance to call play_dead. By disabling preemption until we are ready to explicitly schedule, this bug is fixed and the idle threads generally become more robust. From: alexs <ashepard@u.washington.edu> PPC build fix From: Yoichi Yuasa <yuasa@hh.iij4u.or.jp> MIPS build fix Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Yoichi Yuasa <yuasa@hh.iij4u.or.jp> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
ede3d0fba9
commit
5bfb5d690f
|
@ -116,13 +116,13 @@ void cpu_idle(void)
|
|||
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
preempt_disable();
|
||||
leds_event(led_idle_start);
|
||||
while (!need_resched())
|
||||
idle();
|
||||
leds_event(led_idle_end);
|
||||
preempt_enable();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -256,7 +256,9 @@ void __cpuexit cpu_die(void)
|
|||
asmlinkage void __cpuinit secondary_start_kernel(void)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
printk("CPU%u: Booted secondary processor\n", cpu);
|
||||
|
||||
|
@ -273,6 +275,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
local_flush_tlb_all();
|
||||
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Give the platform a chance to do its own initialisation.
|
||||
|
|
|
@ -74,15 +74,13 @@ __setup("hlt", hlt_setup);
|
|||
void cpu_idle(void)
|
||||
{
|
||||
/* endless idle loop with no priority at all */
|
||||
preempt_disable();
|
||||
while (1) {
|
||||
while (!need_resched()) {
|
||||
local_irq_disable();
|
||||
if (!need_resched() && !hlt_counter)
|
||||
local_irq_enable();
|
||||
}
|
||||
while (!need_resched())
|
||||
cpu_relax();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
|
||||
static char reboot_mode = 'h';
|
||||
|
|
|
@ -161,6 +161,7 @@ void __init smp_callin(void)
|
|||
REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
|
||||
unmask_irq(IPI_INTR_VECT);
|
||||
unmask_irq(TIMER_INTR_VECT);
|
||||
preempt_disable();
|
||||
local_irq_enable();
|
||||
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
|
|
|
@ -218,7 +218,9 @@ void cpu_idle (void)
|
|||
idle = default_idle;
|
||||
idle();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -77,16 +77,20 @@ void (*idle)(void) = core_sleep_idle;
|
|||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
while (!need_resched()) {
|
||||
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
|
||||
irq_stat[cpu].idle_timestamp = jiffies;
|
||||
|
||||
if (!frv_dma_inprogress && idle)
|
||||
idle();
|
||||
}
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,22 +53,18 @@ asmlinkage void ret_from_fork(void);
|
|||
#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
|
||||
void default_idle(void)
|
||||
{
|
||||
while(1) {
|
||||
if (!need_resched()) {
|
||||
local_irq_enable();
|
||||
__asm__("sleep");
|
||||
local_irq_disable();
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
local_irq_disable();
|
||||
if (!need_resched()) {
|
||||
local_irq_enable();
|
||||
/* XXX: race here! What if need_resched() gets set now? */
|
||||
__asm__("sleep");
|
||||
} else
|
||||
local_irq_enable();
|
||||
}
|
||||
#else
|
||||
void default_idle(void)
|
||||
{
|
||||
while(1) {
|
||||
if (need_resched())
|
||||
schedule();
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
#endif
|
||||
void (*idle)(void) = default_idle;
|
||||
|
@ -81,7 +77,13 @@ void (*idle)(void) = default_idle;
|
|||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
idle();
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
idle();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
void machine_restart(char * __unused)
|
||||
|
|
|
@ -179,7 +179,7 @@ static inline void play_dead(void)
|
|||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
|
@ -201,7 +201,9 @@ void cpu_idle(void)
|
|||
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
|
||||
idle();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -485,6 +485,7 @@ static void __devinit start_secondary(void *unused)
|
|||
* things done here to the most necessary things.
|
||||
*/
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
|
||||
rep_nop();
|
||||
|
|
|
@ -292,7 +292,9 @@ cpu_idle (void)
|
|||
#ifdef CONFIG_SMP
|
||||
normal_xtp();
|
||||
#endif
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
check_pgt_cache();
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
play_dead();
|
||||
|
|
|
@ -399,6 +399,7 @@ start_secondary (void *unused)
|
|||
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
|
||||
efi_map_pal_code();
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
|
||||
cpu_idle();
|
||||
|
|
|
@ -104,7 +104,9 @@ void cpu_idle (void)
|
|||
|
||||
idle();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -426,6 +426,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
int __init start_secondary(void *unused)
|
||||
{
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
|
||||
cpu_relax();
|
||||
|
|
|
@ -102,7 +102,9 @@ void cpu_idle(void)
|
|||
while (1) {
|
||||
while (!need_resched())
|
||||
idle();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,9 @@ ATTRIB_NORET void cpu_idle(void)
|
|||
while (!need_resched())
|
||||
if (cpu_wait)
|
||||
(*cpu_wait)();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ extern ATTRIB_NORET void cpu_idle(void);
|
|||
*/
|
||||
asmlinkage void start_secondary(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
cpu_probe();
|
||||
cpu_report();
|
||||
|
@ -95,6 +95,8 @@ asmlinkage void start_secondary(void)
|
|||
*/
|
||||
|
||||
calibrate_delay();
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||
|
||||
prom_smp_finish();
|
||||
|
|
|
@ -92,7 +92,9 @@ void cpu_idle(void)
|
|||
while (1) {
|
||||
while (!need_resched())
|
||||
barrier();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
check_pgt_cache();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -463,6 +463,7 @@ void __init smp_callin(void)
|
|||
#endif
|
||||
|
||||
smp_cpu_init(slave_id);
|
||||
preempt_disable();
|
||||
|
||||
#if 0 /* NOT WORKING YET - see entry.S */
|
||||
istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
|
||||
|
|
|
@ -694,7 +694,9 @@ static void iseries_shared_idle(void)
|
|||
if (hvlpevent_is_pending())
|
||||
process_iSeries_events();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -726,7 +728,9 @@ static void iseries_dedicated_idle(void)
|
|||
}
|
||||
|
||||
ppc64_runlatch_on();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -539,7 +539,9 @@ static void pseries_dedicated_idle(void)
|
|||
lpaca->lppaca.idle = 0;
|
||||
ppc64_runlatch_on();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
|
@ -583,7 +585,9 @@ static void pseries_shared_idle(void)
|
|||
lpaca->lppaca.idle = 0;
|
||||
ppc64_runlatch_on();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
|
|
|
@ -53,10 +53,6 @@ void default_idle(void)
|
|||
}
|
||||
#endif
|
||||
}
|
||||
if (need_resched())
|
||||
schedule();
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -64,11 +60,22 @@ void default_idle(void)
|
|||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
for (;;)
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
for (;;) {
|
||||
if (ppc_md.idle != NULL)
|
||||
ppc_md.idle();
|
||||
else
|
||||
default_idle();
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
if (need_resched()) {
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
|
||||
|
|
|
@ -341,6 +341,7 @@ int __devinit start_secondary(void *unused)
|
|||
cpu = smp_processor_id();
|
||||
smp_store_cpu_info(cpu);
|
||||
set_dec(tb_ticks_per_jiffy);
|
||||
preempt_disable();
|
||||
cpu_callin_map[cpu] = 1;
|
||||
|
||||
printk("CPU %d done callin...\n", cpu);
|
||||
|
|
|
@ -61,7 +61,9 @@ void default_idle(void)
|
|||
}
|
||||
|
||||
ppc64_runlatch_on();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
|
||||
cpu_die();
|
||||
}
|
||||
|
@ -77,7 +79,9 @@ void native_idle(void)
|
|||
|
||||
if (need_resched()) {
|
||||
ppc64_runlatch_on();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
if (cpu_is_offline(smp_processor_id()) &&
|
||||
|
|
|
@ -102,7 +102,6 @@ void default_idle(void)
|
|||
local_irq_disable();
|
||||
if (need_resched()) {
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -139,8 +138,14 @@ void default_idle(void)
|
|||
|
||||
void cpu_idle(void)
|
||||
{
|
||||
for (;;)
|
||||
default_idle();
|
||||
for (;;) {
|
||||
while (!need_resched())
|
||||
default_idle();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
|
|
|
@ -533,6 +533,7 @@ int __devinit start_secondary(void *cpuvoid)
|
|||
{
|
||||
/* Setup the cpu */
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
/* init per CPU timer */
|
||||
init_cpu_timer();
|
||||
#ifdef CONFIG_VIRT_TIMER
|
||||
|
|
|
@ -64,7 +64,9 @@ void default_idle(void)
|
|||
cpu_sleep();
|
||||
}
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,9 @@ int __cpu_up(unsigned int cpu)
|
|||
|
||||
int start_secondary(void *unused)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
|
@ -120,6 +122,7 @@ int start_secondary(void *unused)
|
|||
smp_store_cpu_info(cpu);
|
||||
|
||||
__smp_slave_init(cpu);
|
||||
preempt_disable();
|
||||
per_cpu_trap_init();
|
||||
|
||||
atomic_inc(&cpus_booted);
|
||||
|
|
|
@ -334,7 +334,9 @@ void default_idle(void)
|
|||
}
|
||||
local_irq_enable();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,9 @@ void cpu_idle(void)
|
|||
(*pm_idle)();
|
||||
}
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
check_pgt_cache();
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +135,9 @@ void cpu_idle(void)
|
|||
/* endless idle loop with no priority at all */
|
||||
while(1) {
|
||||
if(need_resched()) {
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
check_pgt_cache();
|
||||
}
|
||||
barrier(); /* or else gcc optimizes... */
|
||||
|
|
|
@ -74,7 +74,9 @@ void cpu_idle(void)
|
|||
while (!need_resched())
|
||||
barrier();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
check_pgt_cache();
|
||||
}
|
||||
}
|
||||
|
@ -93,7 +95,9 @@ void cpu_idle(void)
|
|||
if (need_resched()) {
|
||||
unidle_me();
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
check_pgt_cache();
|
||||
}
|
||||
|
|
|
@ -168,6 +168,9 @@ void __init smp_callin(void)
|
|||
rmb();
|
||||
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
void cpu_panic(void)
|
||||
|
|
|
@ -36,11 +36,8 @@ extern void ret_from_fork (void);
|
|||
/* The idle loop. */
|
||||
void default_idle (void)
|
||||
{
|
||||
while (1) {
|
||||
while (! need_resched ())
|
||||
asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
|
||||
schedule ();
|
||||
}
|
||||
while (! need_resched ())
|
||||
asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
|
||||
}
|
||||
|
||||
void (*idle)(void) = default_idle;
|
||||
|
@ -54,7 +51,14 @@ void (*idle)(void) = default_idle;
|
|||
void cpu_idle (void)
|
||||
{
|
||||
/* endless idle loop with no priority at all */
|
||||
(*idle) ();
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
(*idle) ();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -204,7 +204,9 @@ void cpu_idle (void)
|
|||
idle();
|
||||
}
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -472,6 +472,7 @@ void __cpuinit start_secondary(void)
|
|||
* things done here to the most necessary things.
|
||||
*/
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
|
||||
/* otherwise gcc will move up the smp_processor_id before the cpu_init */
|
||||
|
|
|
@ -96,8 +96,9 @@ void cpu_idle(void)
|
|||
while (1) {
|
||||
while (!need_resched())
|
||||
platform_idle();
|
||||
preempt_enable();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -394,14 +394,16 @@ static void noinline rest_init(void)
|
|||
kernel_thread(init, NULL, CLONE_FS | CLONE_SIGHAND);
|
||||
numa_default_policy();
|
||||
unlock_kernel();
|
||||
preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
* The boot idle thread must execute schedule()
|
||||
* at least one to get things moving:
|
||||
*/
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
||||
/* Call into cpu_idle with preempt disabled */
|
||||
cpu_idle();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue