Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched, doc: Update sched-design-CFS.txt
  sched: Remove unused 'rq' variable and cpu_rq() call from alloc_fair_sched_group()
  sched.h: Fix a typo ("its")
  sched: Fix yield_to kernel-doc
This commit is contained in:
Linus Torvalds 2011-03-25 17:59:38 -07:00
commit 8dd90265ac
5 changed files with 4 additions and 14 deletions

View File

@ -164,7 +164,7 @@ This is the (partial) list of the hooks:
It puts the scheduling entity (task) into the red-black tree and It puts the scheduling entity (task) into the red-black tree and
increments the nr_running variable. increments the nr_running variable.
- dequeue_tree(...) - dequeue_task(...)
When a task is no longer runnable, this function is called to keep the When a task is no longer runnable, this function is called to keep the
corresponding scheduling entity out of the red-black tree. It decrements corresponding scheduling entity out of the red-black tree. It decrements
@ -195,11 +195,6 @@ This is the (partial) list of the hooks:
This function is mostly called from time tick functions; it might lead to This function is mostly called from time tick functions; it might lead to
process switch. This drives the running preemption. process switch. This drives the running preemption.
- task_new(...)
The core scheduler gives the scheduling module an opportunity to manage new
task startup. The CFS scheduling module uses it for group scheduling, while
the scheduling module for a real-time task does not use it.

View File

@ -517,7 +517,7 @@ struct thread_group_cputimer {
struct autogroup; struct autogroup;
/* /*
* NOTE! "signal_struct" does not have it's own * NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always * locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking * implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of * sighand_struct is always a proper superset of

View File

@ -5473,6 +5473,8 @@ EXPORT_SYMBOL(yield);
* yield_to - yield the current processor to another thread in * yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the * your thread group, or accelerate that thread toward the
* processor it's on. * processor it's on.
* @p: target task
* @preempt: whether task preemption is allowed or not
* *
* It's the caller's job to ensure that the target task struct * It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks. * can't go away on us before we can do any checks.
@ -8449,7 +8451,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
struct sched_entity *se; struct sched_entity *se;
struct rq *rq;
int i; int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@ -8462,8 +8463,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
tg->shares = NICE_0_LOAD; tg->shares = NICE_0_LOAD;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
rq = cpu_rq(i);
cfs_rq = kzalloc_node(sizeof(struct cfs_rq), cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i)); GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq) if (!cfs_rq)

View File

@ -94,6 +94,4 @@ static const struct sched_class idle_sched_class = {
.prio_changed = prio_changed_idle, .prio_changed = prio_changed_idle,
.switched_to = switched_to_idle, .switched_to = switched_to_idle,
/* no .task_new for idle tasks */
}; };

View File

@ -102,6 +102,4 @@ static const struct sched_class stop_sched_class = {
.prio_changed = prio_changed_stop, .prio_changed = prio_changed_stop,
.switched_to = switched_to_stop, .switched_to = switched_to_stop,
/* no .task_new for stop tasks */
}; };