sched_rt: Fix overload bug on rt group scheduling
Fixes an easily triggerable BUG() when setting process affinities. Make sure to count the number of migratable tasks in the same place: the root rt_rq. Otherwise the number doesn't make sense and we'll hit the BUG in set_cpus_allowed_rt(). Also, make sure we only count tasks, not groups (this is probably already taken care of by the fact that rt_se->nr_cpus_allowed will be 0 for groups, but be more explicit) Tested-by: Thomas Gleixner <tglx@linutronix.de> CC: stable@kernel.org Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Gregory Haskins <ghaskins@novell.com> LKML-Reference: <1247067476.9777.57.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
d4ec36bac3
commit
a1ba4d8ba9
|
@ -493,6 +493,7 @@ struct rt_rq {
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long rt_nr_migratory;
|
unsigned long rt_nr_migratory;
|
||||||
|
unsigned long rt_nr_total;
|
||||||
int overloaded;
|
int overloaded;
|
||||||
struct plist_head pushable_tasks;
|
struct plist_head pushable_tasks;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
|
||||||
|
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
|
||||||
|
|
||||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
return rt_rq->rq;
|
return rt_rq->rq;
|
||||||
|
@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||||
|
|
||||||
#else /* CONFIG_RT_GROUP_SCHED */
|
#else /* CONFIG_RT_GROUP_SCHED */
|
||||||
|
|
||||||
|
#define rt_entity_is_task(rt_se) (1)
|
||||||
|
|
||||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
return container_of(rt_rq, struct rq, rt);
|
return container_of(rt_rq, struct rq, rt);
|
||||||
|
@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq)
|
||||||
|
|
||||||
static void update_rt_migration(struct rt_rq *rt_rq)
|
static void update_rt_migration(struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
|
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
|
||||||
if (!rt_rq->overloaded) {
|
if (!rt_rq->overloaded) {
|
||||||
rt_set_overload(rq_of_rt_rq(rt_rq));
|
rt_set_overload(rq_of_rt_rq(rt_rq));
|
||||||
rt_rq->overloaded = 1;
|
rt_rq->overloaded = 1;
|
||||||
|
@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq)
|
||||||
|
|
||||||
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
|
if (!rt_entity_is_task(rt_se))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
||||||
|
|
||||||
|
rt_rq->rt_nr_total++;
|
||||||
if (rt_se->nr_cpus_allowed > 1)
|
if (rt_se->nr_cpus_allowed > 1)
|
||||||
rt_rq->rt_nr_migratory++;
|
rt_rq->rt_nr_migratory++;
|
||||||
|
|
||||||
|
@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
|
|
||||||
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
|
if (!rt_entity_is_task(rt_se))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
|
||||||
|
|
||||||
|
rt_rq->rt_nr_total--;
|
||||||
if (rt_se->nr_cpus_allowed > 1)
|
if (rt_se->nr_cpus_allowed > 1)
|
||||||
rt_rq->rt_nr_migratory--;
|
rt_rq->rt_nr_migratory--;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue