Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixlets" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cputime: Fix accounting on multi-threaded processes sched/debug: Fix sd->*_idx limit range avoiding overflow sched_clock: Prevent 64bit inatomicity on 32bit systems sched: Convert BUG_ON()s in try_to_wake_up_local() to WARN_ON_ONCE()s
This commit is contained in:
commit
af788e35bf
|
@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
|
|||
u64 this_clock, remote_clock;
|
||||
u64 *ptr, old_val, val;
|
||||
|
||||
#if BITS_PER_LONG != 64
|
||||
again:
|
||||
/*
|
||||
* Careful here: The local and the remote clock values need to
|
||||
* be read out atomic as we need to compare the values and
|
||||
* then update either the local or the remote side. So the
|
||||
* cmpxchg64 below only protects one readout.
|
||||
*
|
||||
* We must reread via sched_clock_local() in the retry case on
|
||||
* 32bit as an NMI could use sched_clock_local() via the
|
||||
* tracer and hit between the readout of
|
||||
* the low32bit and the high 32bit portion.
|
||||
*/
|
||||
this_clock = sched_clock_local(my_scd);
|
||||
/*
|
||||
* We must enforce atomic readout on 32bit, otherwise the
|
||||
* update on the remote cpu can hit inbetween the readout of
|
||||
* the low32bit and the high 32bit portion.
|
||||
*/
|
||||
remote_clock = cmpxchg64(&scd->clock, 0, 0);
|
||||
#else
|
||||
/*
|
||||
* On 64bit the read of [my]scd->clock is atomic versus the
|
||||
* update, so we can avoid the above 32bit dance.
|
||||
*/
|
||||
sched_clock_local(my_scd);
|
||||
again:
|
||||
this_clock = my_scd->clock;
|
||||
remote_clock = scd->clock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Use the opportunity that we have both locks
|
||||
|
|
|
@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
BUG_ON(rq != this_rq());
|
||||
BUG_ON(p == current);
|
||||
if (WARN_ON_ONCE(rq != this_rq()) ||
|
||||
WARN_ON_ONCE(p == current))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
if (!raw_spin_trylock(&p->pi_lock)) {
|
||||
|
@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
|
|||
}
|
||||
|
||||
static int min_load_idx = 0;
|
||||
static int max_load_idx = CPU_LOAD_IDX_MAX;
|
||||
static int max_load_idx = CPU_LOAD_IDX_MAX-1;
|
||||
|
||||
static void
|
||||
set_table_entry(struct ctl_table *entry,
|
||||
|
|
|
@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
|||
|
||||
t = tsk;
|
||||
do {
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
task_cputime(t, &utime, &stime);
|
||||
times->utime += utime;
|
||||
times->stime += stime;
|
||||
times->sum_exec_runtime += task_sched_runtime(t);
|
||||
|
|
Loading…
Reference in New Issue