sched: fair scheduler should not resched rt tasks
With use of ftrace Steven noticed that some RT tasks got rescheduled due to sched_fair interaction. What happens is that we reprogram the hrtick from enqueue/dequeue_fair_task() because that can change nr_running, and thus a current tasks ideal runtime. However, its possible the current task isn't a fair_sched_class task, and thus doesn't have a hrtick set to change. Fix this by wrapping those hrtick_start_fair() calls in a hrtick_update() function, which will check for the right conditions. Reported-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ffda12a17a
commit
a4c2f00f5c
|
@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
|||
|
||||
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
||||
|
||||
static const struct sched_class fair_sched_class;
|
||||
|
||||
/**************************************************************
|
||||
* CFS operations on generic schedulable entities:
|
||||
*/
|
||||
|
@ -848,11 +850,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
|||
hrtick_start(rq, delta);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* called from enqueue/dequeue and updates the hrtick when the
|
||||
* current task is from our class and nr_running is low enough
|
||||
* to matter.
|
||||
*/
|
||||
static void hrtick_update(struct rq *rq)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
|
||||
if (curr->sched_class != &fair_sched_class)
|
||||
return;
|
||||
|
||||
if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
|
||||
hrtick_start_fair(rq, curr);
|
||||
}
|
||||
#else /* !CONFIG_SCHED_HRTICK */
|
||||
static inline void
|
||||
hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hrtick_update(struct rq *rq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -873,7 +895,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
|
|||
wakeup = 1;
|
||||
}
|
||||
|
||||
hrtick_start_fair(rq, rq->curr);
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -895,7 +917,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
|
|||
sleep = 1;
|
||||
}
|
||||
|
||||
hrtick_start_fair(rq, rq->curr);
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1001,8 +1023,6 @@ static inline int wake_idle(int cpu, struct task_struct *p)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static const struct sched_class fair_sched_class;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
|
|
Loading…
Reference in New Issue