sched: Fix kthread_bind() by moving the body of kthread_bind() to sched.c
Eric Paris reported that commit
f685ceacab
causes boot time
PREEMPT_DEBUG complaints.
[ 4.590699] BUG: using smp_processor_id() in preemptible [00000000] code: rmmod/1314
[ 4.593043] caller is task_hot+0x86/0xd0
Since kthread_bind() messes with scheduler internals, move the
body to sched.c, and lock the runqueue.
Reported-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Tested-by: Eric Paris <eparis@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1256813310.7574.3.camel@marge.simson.net>
[ v2: fix !SMP build and clean up ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6b9de613ae
commit
b84ff7d6f1
|
@ -149,29 +149,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kthread_create);
|
EXPORT_SYMBOL(kthread_create);
|
||||||
|
|
||||||
/**
|
|
||||||
* kthread_bind - bind a just-created kthread to a cpu.
|
|
||||||
* @k: thread created by kthread_create().
|
|
||||||
* @cpu: cpu (might not be online, must be possible) for @k to run on.
|
|
||||||
*
|
|
||||||
* Description: This function is equivalent to set_cpus_allowed(),
|
|
||||||
* except that @cpu doesn't need to be online, and the thread must be
|
|
||||||
* stopped (i.e., just returned from kthread_create()).
|
|
||||||
*/
|
|
||||||
void kthread_bind(struct task_struct *k, unsigned int cpu)
|
|
||||||
{
|
|
||||||
/* Must have done schedule() in kthread() before we set_task_cpu */
|
|
||||||
if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
set_task_cpu(k, cpu);
|
|
||||||
k->cpus_allowed = cpumask_of_cpu(cpu);
|
|
||||||
k->rt.nr_cpus_allowed = 1;
|
|
||||||
k->flags |= PF_THREAD_BOUND;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(kthread_bind);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kthread_stop - stop a thread created by kthread_create().
|
* kthread_stop - stop a thread created by kthread_create().
|
||||||
* @k: thread created by kthread_create().
|
* @k: thread created by kthread_create().
|
||||||
|
|
|
@ -1996,6 +1996,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
||||||
p->sched_class->prio_changed(rq, p, oldprio, running);
|
p->sched_class->prio_changed(rq, p, oldprio, running);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kthread_bind - bind a just-created kthread to a cpu.
|
||||||
|
* @k: thread created by kthread_create().
|
||||||
|
* @cpu: cpu (might not be online, must be possible) for @k to run on.
|
||||||
|
*
|
||||||
|
* Description: This function is equivalent to set_cpus_allowed(),
|
||||||
|
* except that @cpu doesn't need to be online, and the thread must be
|
||||||
|
* stopped (i.e., just returned from kthread_create()).
|
||||||
|
*
|
||||||
|
* Function lives here instead of kthread.c because it messes with
|
||||||
|
* scheduler internals which require locking.
|
||||||
|
*/
|
||||||
|
void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* Must have done schedule() in kthread() before we set_task_cpu */
|
||||||
|
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
|
||||||
|
WARN_ON(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
set_task_cpu(p, cpu);
|
||||||
|
p->cpus_allowed = cpumask_of_cpu(cpu);
|
||||||
|
p->rt.nr_cpus_allowed = 1;
|
||||||
|
p->flags |= PF_THREAD_BOUND;
|
||||||
|
spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(kthread_bind);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
* Is this task likely cache-hot:
|
* Is this task likely cache-hot:
|
||||||
|
|
Loading…
Reference in New Issue