sched: Change get_rq_runnable_load() to static and inline

Based-on-patch-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Alex Shi <alex.shi@intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-14-git-send-email-alex.shi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Alex Shi 2013-06-20 10:18:57 +08:00 committed by Ingo Molnar
parent a9cef46a10
commit a9dc5d0e33
1 changed files with 2 additions and 2 deletions

View File

@ -502,12 +502,12 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
}
#ifdef CONFIG_SMP
unsigned long get_rq_runnable_load(struct rq *rq)
static inline unsigned long get_rq_runnable_load(struct rq *rq)
{
return rq->cfs.runnable_load_avg;
}
#else
unsigned long get_rq_runnable_load(struct rq *rq)
static inline unsigned long get_rq_runnable_load(struct rq *rq)
{
return rq->load.weight;
}