sched: Pull resched loop to __schedule() callers
__schedule() disables preemption during its job and re-enables it afterward without doing a preemption check to avoid recursion. But if an event happens after the context switch which requires rescheduling, we need to check again if a task of a higher priority needs the CPU. A preempt irq can raise such a situation. To handle that, __schedule() loops on need_resched(). But preempt_schedule_*() functions, which call __schedule(), also loop on need_resched() to handle missed preempt irqs. Hence we end up with the same loop happening twice. Lets simplify that by attributing the need_resched() loop responsibility to all __schedule() callers. There is a risk that the outer loop now handles reschedules that used to be handled by the inner loop with the added overhead of caller details (inc/dec of PREEMPT_ACTIVE, irq save/restore) but assuming those inner rescheduling loop weren't too frequent, this shouldn't matter. Especially since the whole preemption path is now losing one loop in any case. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1422404652-29067-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9659e1eeee
commit
bfd9b2b5f8
|
@ -2765,6 +2765,10 @@ again:
|
|||
* - explicit schedule() call
|
||||
* - return from syscall or exception to user-space
|
||||
* - return from interrupt-handler to user-space
|
||||
*
|
||||
* WARNING: all callers must re-check need_resched() afterward and reschedule
|
||||
* accordingly in case an event triggered the need for rescheduling (such as
|
||||
* an interrupt waking up a task) while preemption was disabled in __schedule().
|
||||
*/
|
||||
static void __sched __schedule(void)
|
||||
{
|
||||
|
@ -2773,7 +2777,6 @@ static void __sched __schedule(void)
|
|||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
|
@ -2840,8 +2843,6 @@ need_resched:
|
|||
post_schedule(rq);
|
||||
|
||||
sched_preempt_enable_no_resched();
|
||||
if (need_resched())
|
||||
goto need_resched;
|
||||
}
|
||||
|
||||
static inline void sched_submit_work(struct task_struct *tsk)
|
||||
|
@ -2861,7 +2862,9 @@ asmlinkage __visible void __sched schedule(void)
|
|||
struct task_struct *tsk = current;
|
||||
|
||||
sched_submit_work(tsk);
|
||||
__schedule();
|
||||
do {
|
||||
__schedule();
|
||||
} while (need_resched());
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
|
|
Loading…
Reference in New Issue