sched: Move update_load_*() methods from sched.h to fair.c
These inlines are only used by kernel/sched/fair.c so they do not need to be present in the main kernel/sched/sched.h file. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/1366398650-31599-3-git-send-email-paul.gortmaker@windriver.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
45ceebf776
commit
8527632dc9
|
@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
|
|||
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
|
||||
#endif
|
||||
|
||||
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
||||
{
|
||||
lw->weight += inc;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
|
||||
{
|
||||
lw->weight -= dec;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
static inline void update_load_set(struct load_weight *lw, unsigned long w)
|
||||
{
|
||||
lw->weight = w;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increase the granularity value when there are more CPUs,
|
||||
* because with more CPUs the 'effective latency' as visible
|
||||
|
|
|
@ -892,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|||
#define WF_FORK 0x02 /* child wakeup after fork */
|
||||
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
|
||||
|
||||
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
||||
{
|
||||
lw->weight += inc;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
|
||||
{
|
||||
lw->weight -= dec;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
static inline void update_load_set(struct load_weight *lw, unsigned long w)
|
||||
{
|
||||
lw->weight = w;
|
||||
lw->inv_weight = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* To aid in avoiding the subversion of "niceness" due to uneven distribution
|
||||
* of tasks with abnormal "nice" values across CPUs the contribution that
|
||||
|
|
Loading…
Reference in New Issue