sched: Revert commit 4c6c4e38c4
("sched/core: Fix endless loop in pick_next_task()")
This reverts commit 4c6c4e38c4
("sched/core: Fix endless loop in
pick_next_task()"), which is not necessary after ("sched/rt: Substract number
of tasks of throttled queues from rq->nr_running").
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
[conflict resolution with stop task checking patch]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394835307.18748.34.camel@HP-250-G1-Notebook-PC
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f4ebcbc0d7
commit
46383648b3
|
@ -6732,10 +6732,7 @@ static int idle_balance(struct rq *this_rq)
|
|||
|
||||
out:
|
||||
/* Is there a task of a high priority class? */
|
||||
if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
|
||||
((this_rq->stop && this_rq->stop->on_rq) ||
|
||||
this_rq->dl.dl_nr_running ||
|
||||
(this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
|
||||
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
|
||||
pulled_task = -1;
|
||||
|
||||
if (pulled_task) {
|
||||
|
|
|
@ -493,6 +493,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
|
||||
}
|
||||
|
||||
static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
@ -569,6 +574,11 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|||
dequeue_top_rt_rq(rt_rq);
|
||||
}
|
||||
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled;
|
||||
}
|
||||
|
||||
static inline const struct cpumask *sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
|
|
|
@ -425,18 +425,6 @@ struct rt_rq {
|
|||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
|
||||
}
|
||||
#else
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Deadline class' related fields in a runqueue */
|
||||
struct dl_rq {
|
||||
/* runqueue is an rbtree, ordered by deadline */
|
||||
|
|
Loading…
Reference in New Issue