sched: sched.h: make rq locking and clock functions available in stats.h
kernel/sched/sched.h includes "stats.h" half-way through the file. The next patch introduces users of sched.h's rq locking functions and update_rq_clock() in kernel/sched/stats.h. Move those definitions up in the file so they are available in stats.h. Link: http://lkml.kernel.org/r/20180828172258.3185-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Suren Baghdasaryan <surenb@google.com> Tested-by: Daniel Drake <drake@endlessm.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5c54f5b9ed
commit
1f351d7f75
|
@ -957,6 +957,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||
#define raw_rq() raw_cpu_ptr(&runqueues)
|
||||
|
||||
extern void update_rq_clock(struct rq *rq);
|
||||
|
||||
static inline u64 __rq_clock_broken(struct rq *rq)
|
||||
{
|
||||
return READ_ONCE(rq->clock);
|
||||
|
@ -1075,6 +1077,86 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
|
|||
#endif
|
||||
}
|
||||
|
||||
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||
__acquires(rq->lock);
|
||||
|
||||
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||
__acquires(p->pi_lock)
|
||||
__acquires(rq->lock);
|
||||
|
||||
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
__releases(p->pi_lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock_irqsave(&rq->lock, rf->flags);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock(&rq->lock);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_relock(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock(&rq->lock);
|
||||
rq_repin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
enum numa_topology_type {
|
||||
NUMA_DIRECT,
|
||||
|
@ -1717,8 +1799,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
|
|||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
||||
extern void update_rq_clock(struct rq *rq);
|
||||
|
||||
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
|
||||
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
|
||||
|
||||
|
@ -1783,86 +1863,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||
__acquires(rq->lock);
|
||||
|
||||
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||
__acquires(p->pi_lock)
|
||||
__acquires(rq->lock);
|
||||
|
||||
static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
__releases(p->pi_lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock_irqsave(&rq->lock, rf->flags);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_lock(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock(&rq->lock);
|
||||
rq_pin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_relock(struct rq *rq, struct rq_flags *rf)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
raw_spin_lock(&rq->lock);
|
||||
rq_repin_lock(rq, rf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rq_unlock(struct rq *rq, struct rq_flags *rf)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
rq_unpin_lock(rq, rf);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
|
|
Loading…
Reference in New Issue