Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: cpuset: fix regression when failed to generate sched domains sched, signals: fix the racy usage of ->signal in account_group_xxx/run_posix_cpu_timers sched: fix kernel warning on /proc/sched_debug access sched: correct sched-rt-group.txt pathname in init/Kconfig
This commit is contained in:
commit
8c60bfb066
|
@ -354,7 +354,7 @@ config RT_GROUP_SCHED
|
|||
setting below. If enabled, it will also make it impossible to
|
||||
schedule realtime tasks for non-root users until you allocate
|
||||
realtime bandwidth for them.
|
||||
See Documentation/sched-rt-group.txt for more information.
|
||||
See Documentation/scheduler/sched-rt-group.txt for more information.
|
||||
|
||||
choice
|
||||
depends on GROUP_SCHED
|
||||
|
|
|
@ -587,7 +587,6 @@ static int generate_sched_domains(cpumask_t **domains,
|
|||
int ndoms; /* number of sched domains in result */
|
||||
int nslot; /* next empty doms[] cpumask_t slot */
|
||||
|
||||
ndoms = 0;
|
||||
doms = NULL;
|
||||
dattr = NULL;
|
||||
csa = NULL;
|
||||
|
@ -674,10 +673,8 @@ restart:
|
|||
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
|
||||
*/
|
||||
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!doms) {
|
||||
ndoms = 0;
|
||||
if (!doms)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* The rest of the code, including the scheduler, can deal with
|
||||
|
@ -732,6 +729,13 @@ restart:
|
|||
done:
|
||||
kfree(csa);
|
||||
|
||||
/*
|
||||
* Fallback to the default domain if kmalloc() failed.
|
||||
* See comments in partition_sched_domains().
|
||||
*/
|
||||
if (doms == NULL)
|
||||
ndoms = 1;
|
||||
|
||||
*domains = doms;
|
||||
*attributes = dattr;
|
||||
return ndoms;
|
||||
|
|
|
@ -1308,9 +1308,10 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
|
|||
*/
|
||||
static inline int fastpath_timer_check(struct task_struct *tsk)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
struct signal_struct *sig;
|
||||
|
||||
if (unlikely(!sig))
|
||||
/* tsk == current, ensure it is safe to use ->signal/sighand */
|
||||
if (unlikely(tsk->exit_state))
|
||||
return 0;
|
||||
|
||||
if (!task_cputime_zero(&tsk->cputime_expires)) {
|
||||
|
@ -1323,6 +1324,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|||
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
|
||||
return 1;
|
||||
}
|
||||
|
||||
sig = tsk->signal;
|
||||
if (!task_cputime_zero(&sig->cputime_expires)) {
|
||||
struct task_cputime group_sample;
|
||||
|
||||
|
|
|
@ -7789,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
|||
*
|
||||
* The passed in 'doms_new' should be kmalloc'd. This routine takes
|
||||
* ownership of it and will kfree it when done with it. If the caller
|
||||
* failed the kmalloc call, then it can pass in doms_new == NULL,
|
||||
* and partition_sched_domains() will fallback to the single partition
|
||||
* 'fallback_doms', it also forces the domains to be rebuilt.
|
||||
* failed the kmalloc call, then it can pass in doms_new == NULL &&
|
||||
* ndoms_new == 1, and partition_sched_domains() will fallback to
|
||||
* the single partition 'fallback_doms', it also forces the domains
|
||||
* to be rebuilt.
|
||||
*
|
||||
* If doms_new==NULL it will be replaced with cpu_online_map.
|
||||
* ndoms_new==0 is a special case for destroying existing domains.
|
||||
* It will not create the default domain.
|
||||
* If doms_new == NULL it will be replaced with cpu_online_map.
|
||||
* ndoms_new == 0 is a special case for destroying existing domains,
|
||||
* and it will not create the default domain.
|
||||
*
|
||||
* Call with hotplug lock held
|
||||
*/
|
||||
|
|
|
@ -423,10 +423,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
|||
#undef __P
|
||||
|
||||
{
|
||||
unsigned int this_cpu = raw_smp_processor_id();
|
||||
u64 t0, t1;
|
||||
|
||||
t0 = sched_clock();
|
||||
t1 = sched_clock();
|
||||
t0 = cpu_clock(this_cpu);
|
||||
t1 = cpu_clock(this_cpu);
|
||||
SEQ_printf(m, "%-35s:%21Ld\n",
|
||||
"clock-delta", (long long)(t1-t0));
|
||||
}
|
||||
|
|
|
@ -298,9 +298,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
|
|||
{
|
||||
struct signal_struct *sig;
|
||||
|
||||
sig = tsk->signal;
|
||||
if (unlikely(!sig))
|
||||
/* tsk == current, ensure it is safe to use ->signal */
|
||||
if (unlikely(tsk->exit_state))
|
||||
return;
|
||||
|
||||
sig = tsk->signal;
|
||||
if (sig->cputime.totals) {
|
||||
struct task_cputime *times;
|
||||
|
||||
|
@ -325,9 +327,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
|
|||
{
|
||||
struct signal_struct *sig;
|
||||
|
||||
sig = tsk->signal;
|
||||
if (unlikely(!sig))
|
||||
/* tsk == current, ensure it is safe to use ->signal */
|
||||
if (unlikely(tsk->exit_state))
|
||||
return;
|
||||
|
||||
sig = tsk->signal;
|
||||
if (sig->cputime.totals) {
|
||||
struct task_cputime *times;
|
||||
|
||||
|
@ -353,8 +357,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
|
|||
struct signal_struct *sig;
|
||||
|
||||
sig = tsk->signal;
|
||||
/* see __exit_signal()->task_rq_unlock_wait() */
|
||||
barrier();
|
||||
if (unlikely(!sig))
|
||||
return;
|
||||
|
||||
if (sig->cputime.totals) {
|
||||
struct task_cputime *times;
|
||||
|
||||
|
|
Loading…
Reference in New Issue