sched: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW
Now that the last architecture to use this has stopped doing so (ARM, thanks Catalin!) we can remove this complexity from the scheduler core. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Link: http://lkml.kernel.org/n/tip-g9p2a1w81xxbrze25v9zpzbf@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5ed4f1d96d
commit
f3e9478674
|
@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
|
||||||
Unlocked context switches introduce only a very minor performance
|
Unlocked context switches introduce only a very minor performance
|
||||||
penalty to the core scheduler implementation in the CONFIG_SMP case.
|
penalty to the core scheduler implementation in the CONFIG_SMP case.
|
||||||
|
|
||||||
2. Interrupt status
|
|
||||||
By default, the switch_to arch function is called with interrupts
|
|
||||||
disabled. Interrupts may be enabled over the call if it is likely to
|
|
||||||
introduce a significant interrupt latency by adding the line
|
|
||||||
`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for
|
|
||||||
unlocked context switches. This define also implies
|
|
||||||
`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an
|
|
||||||
example.
|
|
||||||
|
|
||||||
|
|
||||||
CPU idle
|
CPU idle
|
||||||
========
|
========
|
||||||
Your cpu_idle routines need to obey the following rules:
|
Your cpu_idle routines need to obey the following rules:
|
||||||
|
|
|
@ -678,11 +678,6 @@ struct signal_struct {
|
||||||
* (notably. ptrace) */
|
* (notably. ptrace) */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Context switch must be unlocked if interrupts are to be enabled */
|
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
# define __ARCH_WANT_UNLOCKED_CTXSW
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits in flags field of signal_struct.
|
* Bits in flags field of signal_struct.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1280,11 +1280,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
p->irq_events = 0;
|
p->irq_events = 0;
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
p->hardirqs_enabled = 1;
|
|
||||||
#else
|
|
||||||
p->hardirqs_enabled = 0;
|
p->hardirqs_enabled = 0;
|
||||||
#endif
|
|
||||||
p->hardirq_enable_ip = 0;
|
p->hardirq_enable_ip = 0;
|
||||||
p->hardirq_enable_event = 0;
|
p->hardirq_enable_event = 0;
|
||||||
p->hardirq_disable_ip = _THIS_IP_;
|
p->hardirq_disable_ip = _THIS_IP_;
|
||||||
|
|
|
@ -1361,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
||||||
smp_send_reschedule(cpu);
|
smp_send_reschedule(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
|
|
||||||
{
|
|
||||||
struct rq *rq;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
rq = __task_rq_lock(p);
|
|
||||||
if (p->on_cpu) {
|
|
||||||
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
||||||
ttwu_do_wakeup(rq, p, wake_flags);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
__task_rq_unlock(rq);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
|
||||||
|
|
||||||
bool cpus_share_cache(int this_cpu, int that_cpu)
|
bool cpus_share_cache(int this_cpu, int that_cpu)
|
||||||
{
|
{
|
||||||
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
||||||
|
@ -1440,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
* If the owning (remote) cpu is still in the middle of schedule() with
|
* If the owning (remote) cpu is still in the middle of schedule() with
|
||||||
* this task as prev, wait until its done referencing the task.
|
* this task as prev, wait until its done referencing the task.
|
||||||
*/
|
*/
|
||||||
while (p->on_cpu) {
|
while (p->on_cpu)
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
/*
|
|
||||||
* In case the architecture enables interrupts in
|
|
||||||
* context_switch(), we cannot busy wait, since that
|
|
||||||
* would lead to deadlocks when an interrupt hits and
|
|
||||||
* tries to wake up @prev. So bail and do a complete
|
|
||||||
* remote wakeup.
|
|
||||||
*/
|
|
||||||
if (ttwu_activate_remote(p, wake_flags))
|
|
||||||
goto stat;
|
|
||||||
#else
|
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
#endif
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Pairs with the smp_wmb() in finish_lock_switch().
|
* Pairs with the smp_wmb() in finish_lock_switch().
|
||||||
*/
|
*/
|
||||||
|
@ -1798,13 +1766,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||||
prev_state = prev->state;
|
prev_state = prev->state;
|
||||||
account_switch_vtime(prev);
|
account_switch_vtime(prev);
|
||||||
finish_arch_switch(prev);
|
finish_arch_switch(prev);
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
local_irq_disable();
|
|
||||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
|
||||||
perf_event_task_sched_in(prev, current);
|
perf_event_task_sched_in(prev, current);
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
local_irq_enable();
|
|
||||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
|
||||||
finish_lock_switch(rq, prev);
|
finish_lock_switch(rq, prev);
|
||||||
finish_arch_post_lock_switch();
|
finish_arch_post_lock_switch();
|
||||||
|
|
||||||
|
|
|
@ -1632,11 +1632,6 @@ static int push_rt_task(struct rq *rq)
|
||||||
if (!next_task)
|
if (!next_task)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
if (unlikely(task_running(rq, next_task)))
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
if (unlikely(next_task == rq->curr)) {
|
if (unlikely(next_task == rq->curr)) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
|
|
|
@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
||||||
*/
|
*/
|
||||||
next->on_cpu = 1;
|
next->on_cpu = 1;
|
||||||
#endif
|
#endif
|
||||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
raw_spin_unlock_irq(&rq->lock);
|
|
||||||
#else
|
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||||
|
@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
prev->on_cpu = 0;
|
prev->on_cpu = 0;
|
||||||
#endif
|
#endif
|
||||||
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
|
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue