sched: Cure nr_iowait_cpu() users
Commit 0224cf4c5e
(sched: Intoduce get_cpu_iowait_time_us())
broke things by not making sure preemption was indeed disabled
by the callers of nr_iowait_cpu() which took the iowait value of
the current cpu.
This resulted in a heap of preempt warnings. Cure this by making
nr_iowait_cpu() take a cpu number and fix up the callers to pass
in the right number.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Maxim Levitsky <maximlevitsky@gmail.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: linux-pm@lists.linux-foundation.org
LKML-Reference: <1277968037.1868.120.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
9715856922
commit
8c215bd389
|
@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration)
|
||||||
* This allows us to calculate
|
* This allows us to calculate
|
||||||
* E(duration)|iowait
|
* E(duration)|iowait
|
||||||
*/
|
*/
|
||||||
if (nr_iowait_cpu())
|
if (nr_iowait_cpu(smp_processor_id()))
|
||||||
bucket = BUCKETS/2;
|
bucket = BUCKETS/2;
|
||||||
|
|
||||||
if (duration < 10)
|
if (duration < 10)
|
||||||
|
@ -175,7 +175,7 @@ static inline int performance_multiplier(void)
|
||||||
mult += 2 * get_loadavg();
|
mult += 2 * get_loadavg();
|
||||||
|
|
||||||
/* for IO wait tasks (per cpu!) we add 5x each */
|
/* for IO wait tasks (per cpu!) we add 5x each */
|
||||||
mult += 10 * nr_iowait_cpu();
|
mult += 10 * nr_iowait_cpu(smp_processor_id());
|
||||||
|
|
||||||
return mult;
|
return mult;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,7 @@ extern int nr_processes(void);
|
||||||
extern unsigned long nr_running(void);
|
extern unsigned long nr_running(void);
|
||||||
extern unsigned long nr_uninterruptible(void);
|
extern unsigned long nr_uninterruptible(void);
|
||||||
extern unsigned long nr_iowait(void);
|
extern unsigned long nr_iowait(void);
|
||||||
extern unsigned long nr_iowait_cpu(void);
|
extern unsigned long nr_iowait_cpu(int cpu);
|
||||||
extern unsigned long this_cpu_load(void);
|
extern unsigned long this_cpu_load(void);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2864,9 +2864,9 @@ unsigned long nr_iowait(void)
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long nr_iowait_cpu(void)
|
unsigned long nr_iowait_cpu(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *this = this_rq();
|
struct rq *this = cpu_rq(cpu);
|
||||||
return atomic_read(&this->nr_iowait);
|
return atomic_read(&this->nr_iowait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now)
|
||||||
* Updates the per cpu time idle statistics counters
|
* Updates the per cpu time idle statistics counters
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
||||||
{
|
{
|
||||||
ktime_t delta;
|
ktime_t delta;
|
||||||
|
|
||||||
if (ts->idle_active) {
|
if (ts->idle_active) {
|
||||||
delta = ktime_sub(now, ts->idle_entrytime);
|
delta = ktime_sub(now, ts->idle_entrytime);
|
||||||
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
||||||
if (nr_iowait_cpu() > 0)
|
if (nr_iowait_cpu(cpu) > 0)
|
||||||
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
|
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
|
||||||
ts->idle_entrytime = now;
|
ts->idle_entrytime = now;
|
||||||
}
|
}
|
||||||
|
@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||||
|
|
||||||
update_ts_time_stats(ts, now, NULL);
|
update_ts_time_stats(cpu, ts, now, NULL);
|
||||||
ts->idle_active = 0;
|
ts->idle_active = 0;
|
||||||
|
|
||||||
sched_clock_idle_wakeup_event(0);
|
sched_clock_idle_wakeup_event(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
|
||||||
{
|
{
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
|
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
|
|
||||||
update_ts_time_stats(ts, now, NULL);
|
update_ts_time_stats(cpu, ts, now, NULL);
|
||||||
|
|
||||||
ts->idle_entrytime = now;
|
ts->idle_entrytime = now;
|
||||||
ts->idle_active = 1;
|
ts->idle_active = 1;
|
||||||
|
@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
||||||
if (!tick_nohz_enabled)
|
if (!tick_nohz_enabled)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
update_ts_time_stats(ts, ktime_get(), last_update_time);
|
update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
|
||||||
|
|
||||||
return ktime_to_us(ts->idle_sleeptime);
|
return ktime_to_us(ts->idle_sleeptime);
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
||||||
if (!tick_nohz_enabled)
|
if (!tick_nohz_enabled)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
update_ts_time_stats(ts, ktime_get(), last_update_time);
|
update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
|
||||||
|
|
||||||
return ktime_to_us(ts->iowait_sleeptime);
|
return ktime_to_us(ts->iowait_sleeptime);
|
||||||
}
|
}
|
||||||
|
@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||||
*/
|
*/
|
||||||
ts->inidle = 1;
|
ts->inidle = 1;
|
||||||
|
|
||||||
now = tick_nohz_start_idle(ts);
|
now = tick_nohz_start_idle(cpu, ts);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this cpu is offline and it is the one which updates
|
* If this cpu is offline and it is the one which updates
|
||||||
|
|
Loading…
Reference in New Issue