sched: Always provide p->on_cpu
Always provide p->on_cpu so that we can determine if its on a cpu without having to lock the rq. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152728.785452014@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
184748cc50
commit
3ca7a440da
|
@ -1200,9 +1200,7 @@ struct task_struct {
|
|||
int lock_depth; /* BKL lock depth */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
|
||||
int oncpu;
|
||||
#endif
|
||||
int on_cpu;
|
||||
#endif
|
||||
|
||||
int prio, static_prio, normal_prio;
|
||||
|
|
|
@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
|
|||
return rq->curr == p;
|
||||
}
|
||||
|
||||
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
|
||||
static inline int task_running(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return p->on_cpu;
|
||||
#else
|
||||
return task_current(rq, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
|
||||
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* We can optimise this out completely for !SMP, because the
|
||||
* SMP rebalancing from interrupt is the only thing that cares
|
||||
* here.
|
||||
*/
|
||||
next->on_cpu = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* After ->on_cpu is cleared, the task can be moved to a different CPU.
|
||||
* We must ensure this doesn't happen until the switch is completely
|
||||
* finished.
|
||||
*/
|
||||
smp_wmb();
|
||||
prev->on_cpu = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
/* this is a valid case when another task releases the spinlock */
|
||||
rq->lock.owner = current;
|
||||
|
@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|||
}
|
||||
|
||||
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
|
||||
static inline int task_running(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return p->oncpu;
|
||||
#else
|
||||
return task_current(rq, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
|||
* SMP rebalancing from interrupt is the only thing that cares
|
||||
* here.
|
||||
*/
|
||||
next->oncpu = 1;
|
||||
next->on_cpu = 1;
|
||||
#endif
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
|
@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* After ->oncpu is cleared, the task can be moved to a different CPU.
|
||||
* After ->on_cpu is cleared, the task can be moved to a different CPU.
|
||||
* We must ensure this doesn't happen until the switch is completely
|
||||
* finished.
|
||||
*/
|
||||
smp_wmb();
|
||||
prev->oncpu = 0;
|
||||
prev->on_cpu = 0;
|
||||
#endif
|
||||
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_enable();
|
||||
|
@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|||
if (likely(sched_info_on()))
|
||||
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
||||
#endif
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
p->oncpu = 0;
|
||||
#if defined(CONFIG_SMP)
|
||||
p->on_cpu = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Want to start with kernel preemption disabled. */
|
||||
|
@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|||
rcu_read_unlock();
|
||||
|
||||
rq->curr = rq->idle = idle;
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
idle->oncpu = 1;
|
||||
#if defined(CONFIG_SMP)
|
||||
idle->on_cpu = 1;
|
||||
#endif
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
|
|
Loading…
Reference in New Issue