Merge branch 'sched/latest' of git://git.kernel.org/pub/scm/linux/kernel/git/ghaskins/linux-2.6-hacks into sched/rt
This commit is contained in:
commit
0a6d4e1dc9
|
@ -142,6 +142,7 @@ extern struct cred init_cred;
|
|||
.nr_cpus_allowed = NR_CPUS, \
|
||||
}, \
|
||||
.tasks = LIST_HEAD_INIT(tsk.tasks), \
|
||||
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
|
||||
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
|
||||
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
|
||||
.real_parent = &tsk, \
|
||||
|
|
|
@ -96,6 +96,10 @@ struct plist_node {
|
|||
# define PLIST_HEAD_LOCK_INIT(_lock)
|
||||
#endif
|
||||
|
||||
#define _PLIST_HEAD_INIT(head) \
|
||||
.prio_list = LIST_HEAD_INIT((head).prio_list), \
|
||||
.node_list = LIST_HEAD_INIT((head).node_list)
|
||||
|
||||
/**
|
||||
* PLIST_HEAD_INIT - static struct plist_head initializer
|
||||
* @head: struct plist_head variable name
|
||||
|
@ -103,8 +107,7 @@ struct plist_node {
|
|||
*/
|
||||
#define PLIST_HEAD_INIT(head, _lock) \
|
||||
{ \
|
||||
.prio_list = LIST_HEAD_INIT((head).prio_list), \
|
||||
.node_list = LIST_HEAD_INIT((head).node_list), \
|
||||
_PLIST_HEAD_INIT(head), \
|
||||
PLIST_HEAD_LOCK_INIT(&(_lock)) \
|
||||
}
|
||||
|
||||
|
@ -116,7 +119,7 @@ struct plist_node {
|
|||
#define PLIST_NODE_INIT(node, __prio) \
|
||||
{ \
|
||||
.prio = (__prio), \
|
||||
.plist = PLIST_HEAD_INIT((node).plist, NULL), \
|
||||
.plist = { _PLIST_HEAD_INIT((node).plist) }, \
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -977,6 +977,7 @@ struct sched_class {
|
|||
struct rq *busiest, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle);
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
int (*needs_post_schedule) (struct rq *this_rq);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
|
@ -1143,6 +1144,7 @@ struct task_struct {
|
|||
#endif
|
||||
|
||||
struct list_head tasks;
|
||||
struct plist_node pushable_tasks;
|
||||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
|
||||
|
|
|
@ -464,11 +464,15 @@ struct rt_rq {
|
|||
struct rt_prio_array active;
|
||||
unsigned long rt_nr_running;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
int highest_prio; /* highest queued rt task prio */
|
||||
struct {
|
||||
int curr; /* highest queued rt task prio */
|
||||
int next; /* next highest */
|
||||
} highest_prio;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long rt_nr_migratory;
|
||||
int overloaded;
|
||||
struct plist_head pushable_tasks;
|
||||
#endif
|
||||
int rt_throttled;
|
||||
u64 rt_time;
|
||||
|
@ -1607,21 +1611,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
* fair double_lock_balance: Safely acquires both rq->locks in a fair
|
||||
* way at the expense of forcing extra atomic operations in all
|
||||
* invocations. This assures that the double_lock is acquired using the
|
||||
* same underlying policy as the spinlock_t on this architecture, which
|
||||
* reduces latency compared to the unfair variant below. However, it
|
||||
* also adds more overhead and therefore may reduce throughput.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
spin_unlock(&this_rq->lock);
|
||||
double_rq_lock(this_rq, busiest);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
/*
|
||||
* Unfair double_lock_balance: Optimizes throughput at the expense of
|
||||
* latency by eliminating extra atomic operations when the locks are
|
||||
* already in proper order on entry. This favors lower cpu-ids and will
|
||||
* grant the double lock to lower cpus over higher ids under contention,
|
||||
* regardless of entry order into the function.
|
||||
*/
|
||||
static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
|
@ -1634,6 +1659,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
{
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
return _double_lock_balance(this_rq, busiest);
|
||||
}
|
||||
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
|
@ -2445,6 +2486,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|||
/* Want to start with kernel preemption disabled. */
|
||||
task_thread_info(p)->preempt_count = 1;
|
||||
#endif
|
||||
plist_node_init(&p->pushable_tasks, MAX_PRIO);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
|
@ -2585,6 +2628,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|||
{
|
||||
struct mm_struct *mm = rq->prev_mm;
|
||||
long prev_state;
|
||||
#ifdef CONFIG_SMP
|
||||
int post_schedule = 0;
|
||||
|
||||
if (current->sched_class->needs_post_schedule)
|
||||
post_schedule = current->sched_class->needs_post_schedule(rq);
|
||||
#endif
|
||||
|
||||
rq->prev_mm = NULL;
|
||||
|
||||
|
@ -2603,7 +2652,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|||
finish_arch_switch(prev);
|
||||
finish_lock_switch(rq, prev);
|
||||
#ifdef CONFIG_SMP
|
||||
if (current->sched_class->post_schedule)
|
||||
if (post_schedule)
|
||||
current->sched_class->post_schedule(rq);
|
||||
#endif
|
||||
|
||||
|
@ -2984,6 +3033,16 @@ next:
|
|||
pulled++;
|
||||
rem_load_move -= p->se.load.weight;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* NEWIDLE balancing is a source of latency, so preemptible kernels
|
||||
* will stop after the first task is pulled to minimize the critical
|
||||
* section.
|
||||
*/
|
||||
if (idle == CPU_NEWLY_IDLE)
|
||||
goto out;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We only want to steal up to the prescribed amount of weighted load.
|
||||
*/
|
||||
|
@ -3030,9 +3089,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||
sd, idle, all_pinned, &this_best_prio);
|
||||
class = class->next;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* NEWIDLE balancing is a source of latency, so preemptible
|
||||
* kernels will stop after the first task is pulled to minimize
|
||||
* the critical section.
|
||||
*/
|
||||
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
||||
break;
|
||||
|
||||
#endif
|
||||
} while (class && max_load_move > total_load_moved);
|
||||
|
||||
return total_load_moved > 0;
|
||||
|
@ -8201,11 +8266,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
|||
__set_bit(MAX_RT_PRIO, array->bitmap);
|
||||
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
rt_rq->highest_prio = MAX_RT_PRIO;
|
||||
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
||||
rt_rq->highest_prio.next = MAX_RT_PRIO;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
rt_rq->rt_nr_migratory = 0;
|
||||
rt_rq->overloaded = 0;
|
||||
plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
|
||||
#endif
|
||||
|
||||
rt_rq->rt_time = 0;
|
||||
|
|
|
@ -49,6 +49,24 @@ static void update_rt_migration(struct rq *rq)
|
|||
rq->rt.overloaded = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
plist_node_init(&p->pushable_tasks, p->prio);
|
||||
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define enqueue_pushable_task(rq, p) do { } while (0)
|
||||
#define dequeue_pushable_task(rq, p) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
||||
|
@ -108,7 +126,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|||
if (rt_rq->rt_nr_running) {
|
||||
if (rt_se && !on_rt_rq(rt_se))
|
||||
enqueue_rt_entity(rt_se);
|
||||
if (rt_rq->highest_prio < curr->prio)
|
||||
if (rt_rq->highest_prio.curr < curr->prio)
|
||||
resched_task(curr);
|
||||
}
|
||||
}
|
||||
|
@ -473,7 +491,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
|||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (rt_rq)
|
||||
return rt_rq->highest_prio;
|
||||
return rt_rq->highest_prio.curr;
|
||||
#endif
|
||||
|
||||
return rt_task_of(rt_se)->prio;
|
||||
|
@ -547,33 +565,64 @@ static void update_curr_rt(struct rq *rq)
|
|||
}
|
||||
}
|
||||
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
|
||||
static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
|
||||
|
||||
static inline int next_prio(struct rq *rq)
|
||||
{
|
||||
struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
|
||||
|
||||
if (next && rt_prio(next->prio))
|
||||
return next->prio;
|
||||
else
|
||||
return MAX_RT_PRIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
int prio = rt_se_prio(rt_se);
|
||||
#ifdef CONFIG_SMP
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
#endif
|
||||
|
||||
WARN_ON(!rt_prio(prio));
|
||||
rt_rq->rt_nr_running++;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
|
||||
#ifdef CONFIG_SMP
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
#endif
|
||||
if (prio < rt_rq->highest_prio.curr) {
|
||||
|
||||
rt_rq->highest_prio = rt_se_prio(rt_se);
|
||||
/*
|
||||
* If the new task is higher in priority than anything on the
|
||||
* run-queue, we have a new high that must be published to
|
||||
* the world. We also know that the previous high becomes
|
||||
* our next-highest.
|
||||
*/
|
||||
rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
|
||||
rt_rq->highest_prio.curr = prio;
|
||||
#ifdef CONFIG_SMP
|
||||
if (rq->online)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu,
|
||||
rt_se_prio(rt_se));
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
||||
#endif
|
||||
}
|
||||
} else if (prio == rt_rq->highest_prio.curr)
|
||||
/*
|
||||
* If the next task is equal in priority to the highest on
|
||||
* the run-queue, then we implicitly know that the next highest
|
||||
* task cannot be any lower than current
|
||||
*/
|
||||
rt_rq->highest_prio.next = prio;
|
||||
else if (prio < rt_rq->highest_prio.next)
|
||||
/*
|
||||
* Otherwise, we need to recompute next-highest
|
||||
*/
|
||||
rt_rq->highest_prio.next = next_prio(rq);
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_se->nr_cpus_allowed > 1) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
if (rt_se->nr_cpus_allowed > 1)
|
||||
rq->rt.rt_nr_migratory++;
|
||||
}
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
update_rt_migration(rq);
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
|
@ -590,7 +639,8 @@ static inline
|
|||
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int highest_prio = rt_rq->highest_prio;
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
int highest_prio = rt_rq->highest_prio.curr;
|
||||
#endif
|
||||
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
|
@ -598,33 +648,34 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|||
rt_rq->rt_nr_running--;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_rq->rt_nr_running) {
|
||||
struct rt_prio_array *array;
|
||||
int prio = rt_se_prio(rt_se);
|
||||
|
||||
WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
|
||||
if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
|
||||
/* recalculate */
|
||||
array = &rt_rq->active;
|
||||
rt_rq->highest_prio =
|
||||
WARN_ON(prio < rt_rq->highest_prio.curr);
|
||||
|
||||
/*
|
||||
* This may have been our highest or next-highest priority
|
||||
* task and therefore we may have some recomputation to do
|
||||
*/
|
||||
if (prio == rt_rq->highest_prio.curr) {
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
|
||||
rt_rq->highest_prio.curr =
|
||||
sched_find_first_bit(array->bitmap);
|
||||
} /* otherwise leave rq->highest prio alone */
|
||||
}
|
||||
|
||||
if (prio <= rt_rq->highest_prio.next)
|
||||
rt_rq->highest_prio.next = next_prio(rq);
|
||||
} else
|
||||
rt_rq->highest_prio = MAX_RT_PRIO;
|
||||
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_se->nr_cpus_allowed > 1) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
if (rt_se->nr_cpus_allowed > 1)
|
||||
rq->rt.rt_nr_migratory--;
|
||||
}
|
||||
|
||||
if (rt_rq->highest_prio != highest_prio) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
if (rq->online && rt_rq->highest_prio.curr != highest_prio)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
||||
|
||||
if (rq->online)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu,
|
||||
rt_rq->highest_prio);
|
||||
}
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
update_rt_migration(rq);
|
||||
#endif /* CONFIG_SMP */
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
|
@ -718,6 +769,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
|||
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
|
@ -728,6 +782,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
|||
update_curr_rt(rq);
|
||||
dequeue_rt_entity(rt_se);
|
||||
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
|
@ -878,7 +934,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
|
|||
return next;
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
static struct task_struct *_pick_next_task_rt(struct rq *rq)
|
||||
{
|
||||
struct sched_rt_entity *rt_se;
|
||||
struct task_struct *p;
|
||||
|
@ -900,6 +956,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
|
|||
|
||||
p = rt_task_of(rt_se);
|
||||
p->se.exec_start = rq->clock;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
{
|
||||
struct task_struct *p = _pick_next_task_rt(rq);
|
||||
|
||||
/* The running task is never eligible for pushing */
|
||||
if (p)
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -907,6 +975,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|||
{
|
||||
update_curr_rt(rq);
|
||||
p->se.exec_start = 0;
|
||||
|
||||
/*
|
||||
* The previous task needs to be made eligible for pushing
|
||||
* if it is still active
|
||||
*/
|
||||
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -1072,7 +1147,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|||
}
|
||||
|
||||
/* If this rq is still suitable use it. */
|
||||
if (lowest_rq->rt.highest_prio > task->prio)
|
||||
if (lowest_rq->rt.highest_prio.curr > task->prio)
|
||||
break;
|
||||
|
||||
/* try again */
|
||||
|
@ -1083,6 +1158,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|||
return lowest_rq;
|
||||
}
|
||||
|
||||
static inline int has_pushable_tasks(struct rq *rq)
|
||||
{
|
||||
return !plist_head_empty(&rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
if (!has_pushable_tasks(rq))
|
||||
return NULL;
|
||||
|
||||
p = plist_first_entry(&rq->rt.pushable_tasks,
|
||||
struct task_struct, pushable_tasks);
|
||||
|
||||
BUG_ON(rq->cpu != task_cpu(p));
|
||||
BUG_ON(task_current(rq, p));
|
||||
BUG_ON(p->rt.nr_cpus_allowed <= 1);
|
||||
|
||||
BUG_ON(!p->se.on_rq);
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current CPU has more than one RT task, see if the non
|
||||
* running task can migrate over to a CPU that is running a task
|
||||
|
@ -1092,13 +1192,11 @@ static int push_rt_task(struct rq *rq)
|
|||
{
|
||||
struct task_struct *next_task;
|
||||
struct rq *lowest_rq;
|
||||
int ret = 0;
|
||||
int paranoid = RT_MAX_TRIES;
|
||||
|
||||
if (!rq->rt.overloaded)
|
||||
return 0;
|
||||
|
||||
next_task = pick_next_highest_task_rt(rq, -1);
|
||||
next_task = pick_next_pushable_task(rq);
|
||||
if (!next_task)
|
||||
return 0;
|
||||
|
||||
|
@ -1127,16 +1225,34 @@ static int push_rt_task(struct rq *rq)
|
|||
struct task_struct *task;
|
||||
/*
|
||||
* find lock_lowest_rq releases rq->lock
|
||||
* so it is possible that next_task has changed.
|
||||
* If it has, then try again.
|
||||
* so it is possible that next_task has migrated.
|
||||
*
|
||||
* We need to make sure that the task is still on the same
|
||||
* run-queue and is also still the next task eligible for
|
||||
* pushing.
|
||||
*/
|
||||
task = pick_next_highest_task_rt(rq, -1);
|
||||
if (unlikely(task != next_task) && task && paranoid--) {
|
||||
put_task_struct(next_task);
|
||||
next_task = task;
|
||||
goto retry;
|
||||
task = pick_next_pushable_task(rq);
|
||||
if (task_cpu(next_task) == rq->cpu && task == next_task) {
|
||||
/*
|
||||
* If we get here, the task hasnt moved at all, but
|
||||
* it has failed to push. We will not try again,
|
||||
* since the other cpus will pull from us when they
|
||||
* are ready.
|
||||
*/
|
||||
dequeue_pushable_task(rq, next_task);
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
|
||||
if (!task)
|
||||
/* No more tasks, just exit */
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Something has shifted, try again.
|
||||
*/
|
||||
put_task_struct(next_task);
|
||||
next_task = task;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
deactivate_task(rq, next_task, 0);
|
||||
|
@ -1147,23 +1263,12 @@ static int push_rt_task(struct rq *rq)
|
|||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
|
||||
ret = 1;
|
||||
out:
|
||||
put_task_struct(next_task);
|
||||
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Currently we just use the second highest prio task on
|
||||
* the queue, and stop when it can't migrate (or there's
|
||||
* no more RT tasks). There may be a case where a lower
|
||||
* priority RT task has a different affinity than the
|
||||
* higher RT task. In this case the lower RT task could
|
||||
* possibly be able to migrate where as the higher priority
|
||||
* RT task could not. We currently ignore this issue.
|
||||
* Enhancements are welcome!
|
||||
*/
|
||||
static void push_rt_tasks(struct rq *rq)
|
||||
{
|
||||
/* push_rt_task will return true if it moved an RT */
|
||||
|
@ -1174,33 +1279,35 @@ static void push_rt_tasks(struct rq *rq)
|
|||
static int pull_rt_task(struct rq *this_rq)
|
||||
{
|
||||
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
||||
struct task_struct *p, *next;
|
||||
struct task_struct *p;
|
||||
struct rq *src_rq;
|
||||
|
||||
if (likely(!rt_overloaded(this_rq)))
|
||||
return 0;
|
||||
|
||||
next = pick_next_task_rt(this_rq);
|
||||
|
||||
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
||||
if (this_cpu == cpu)
|
||||
continue;
|
||||
|
||||
src_rq = cpu_rq(cpu);
|
||||
|
||||
/*
|
||||
* Don't bother taking the src_rq->lock if the next highest
|
||||
* task is known to be lower-priority than our current task.
|
||||
* This may look racy, but if this value is about to go
|
||||
* logically higher, the src_rq will push this task away.
|
||||
* And if its going logically lower, we do not care
|
||||
*/
|
||||
if (src_rq->rt.highest_prio.next >=
|
||||
this_rq->rt.highest_prio.curr)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We can potentially drop this_rq's lock in
|
||||
* double_lock_balance, and another CPU could
|
||||
* steal our next task - hence we must cause
|
||||
* the caller to recalculate the next task
|
||||
* in that case:
|
||||
* alter this_rq
|
||||
*/
|
||||
if (double_lock_balance(this_rq, src_rq)) {
|
||||
struct task_struct *old_next = next;
|
||||
|
||||
next = pick_next_task_rt(this_rq);
|
||||
if (next != old_next)
|
||||
ret = 1;
|
||||
}
|
||||
double_lock_balance(this_rq, src_rq);
|
||||
|
||||
/*
|
||||
* Are there still pullable RT tasks?
|
||||
|
@ -1214,7 +1321,7 @@ static int pull_rt_task(struct rq *this_rq)
|
|||
* Do we have an RT task that preempts
|
||||
* the to-be-scheduled task?
|
||||
*/
|
||||
if (p && (!next || (p->prio < next->prio))) {
|
||||
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
||||
WARN_ON(p == src_rq->curr);
|
||||
WARN_ON(!p->se.on_rq);
|
||||
|
||||
|
@ -1224,12 +1331,9 @@ static int pull_rt_task(struct rq *this_rq)
|
|||
* This is just that p is wakeing up and hasn't
|
||||
* had a chance to schedule. We only pull
|
||||
* p if it is lower in priority than the
|
||||
* current task on the run queue or
|
||||
* this_rq next task is lower in prio than
|
||||
* the current task on that rq.
|
||||
* current task on the run queue
|
||||
*/
|
||||
if (p->prio < src_rq->curr->prio ||
|
||||
(next && next->prio < src_rq->curr->prio))
|
||||
if (p->prio < src_rq->curr->prio)
|
||||
goto skip;
|
||||
|
||||
ret = 1;
|
||||
|
@ -1242,13 +1346,7 @@ static int pull_rt_task(struct rq *this_rq)
|
|||
* case there's an even higher prio task
|
||||
* in another runqueue. (low likelyhood
|
||||
* but possible)
|
||||
*
|
||||
* Update next so that we won't pick a task
|
||||
* on another cpu with a priority lower (or equal)
|
||||
* than the one we just picked.
|
||||
*/
|
||||
next = p;
|
||||
|
||||
}
|
||||
skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
|
@ -1260,24 +1358,27 @@ static int pull_rt_task(struct rq *this_rq)
|
|||
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
/* Try to pull RT tasks here if we lower this rq's prio */
|
||||
if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
|
||||
if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
|
||||
pull_rt_task(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* assumes rq->lock is held
|
||||
*/
|
||||
static int needs_post_schedule_rt(struct rq *rq)
|
||||
{
|
||||
return has_pushable_tasks(rq);
|
||||
}
|
||||
|
||||
static void post_schedule_rt(struct rq *rq)
|
||||
{
|
||||
/*
|
||||
* If we have more than one rt_task queued, then
|
||||
* see if we can push the other rt_tasks off to other CPUS.
|
||||
* Note we may release the rq lock, and since
|
||||
* the lock was owned by prev, we need to release it
|
||||
* first via finish_lock_switch and then reaquire it here.
|
||||
* This is only called if needs_post_schedule_rt() indicates that
|
||||
* we need to push tasks away
|
||||
*/
|
||||
if (unlikely(rq->rt.overloaded)) {
|
||||
spin_lock_irq(&rq->lock);
|
||||
push_rt_tasks(rq);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
spin_lock_irq(&rq->lock);
|
||||
push_rt_tasks(rq);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1288,7 +1389,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
|
|||
{
|
||||
if (!task_running(rq, p) &&
|
||||
!test_tsk_need_resched(rq->curr) &&
|
||||
rq->rt.overloaded)
|
||||
has_pushable_tasks(rq) &&
|
||||
p->rt.nr_cpus_allowed > 1)
|
||||
push_rt_tasks(rq);
|
||||
}
|
||||
|
||||
|
@ -1324,6 +1426,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
|||
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
if (!task_current(rq, p)) {
|
||||
/*
|
||||
* Make sure we dequeue this task from the pushable list
|
||||
* before going further. It will either remain off of
|
||||
* the list because we are no longer pushable, or it
|
||||
* will be requeued.
|
||||
*/
|
||||
if (p->rt.nr_cpus_allowed > 1)
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
/*
|
||||
* Requeue if our weight is changing and still > 1
|
||||
*/
|
||||
if (weight > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
}
|
||||
|
||||
if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
|
||||
rq->rt.rt_nr_migratory++;
|
||||
} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
|
||||
|
@ -1346,7 +1466,7 @@ static void rq_online_rt(struct rq *rq)
|
|||
|
||||
__enable_runtime(rq);
|
||||
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
|
@ -1438,7 +1558,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
|
|||
* can release the rq lock and p could migrate.
|
||||
* Only reschedule if p is still on the same runqueue.
|
||||
*/
|
||||
if (p->prio > rq->rt.highest_prio && rq->curr == p)
|
||||
if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
|
||||
resched_task(p);
|
||||
#else
|
||||
/* For UP simply resched on drop of prio */
|
||||
|
@ -1509,6 +1629,9 @@ static void set_curr_task_rt(struct rq *rq)
|
|||
struct task_struct *p = rq->curr;
|
||||
|
||||
p->se.exec_start = rq->clock;
|
||||
|
||||
/* The running task is never eligible for pushing */
|
||||
dequeue_pushable_task(rq, p);
|
||||
}
|
||||
|
||||
static const struct sched_class rt_sched_class = {
|
||||
|
@ -1531,6 +1654,7 @@ static const struct sched_class rt_sched_class = {
|
|||
.rq_online = rq_online_rt,
|
||||
.rq_offline = rq_offline_rt,
|
||||
.pre_schedule = pre_schedule_rt,
|
||||
.needs_post_schedule = needs_post_schedule_rt,
|
||||
.post_schedule = post_schedule_rt,
|
||||
.task_wake_up = task_wake_up_rt,
|
||||
.switched_from = switched_from_rt,
|
||||
|
|
Loading…
Reference in New Issue