timers: more consistently use clock vs timer
While reviewing the manpages, I noticed I'd missed some clock vs timer sites. Make sure that all timer functions call cpu_timer_sample_group() and not cpu_clock_sample_group(). This ensures that we enable the process wide timer in time, and therefore pay the O(n) thread group cost from the syscall. Not doing it here, will result in the first jiffy tick after setting the timer doing this, resulting in a very expensive tick (but only once) and a delay in actually starting the timer. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
37bed90094
commit
3997ad317f
|
@ -680,6 +680,33 @@ static void cpu_timer_fire(struct k_itimer *timer)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sample a process (thread group) timer for the given group_leader task.
|
||||
* Must be called with tasklist_lock held for reading.
|
||||
*/
|
||||
static int cpu_timer_sample_group(const clockid_t which_clock,
|
||||
struct task_struct *p,
|
||||
union cpu_time_count *cpu)
|
||||
{
|
||||
struct task_cputime cputime;
|
||||
|
||||
thread_group_cputimer(p, &cputime);
|
||||
switch (CPUCLOCK_WHICH(which_clock)) {
|
||||
default:
|
||||
return -EINVAL;
|
||||
case CPUCLOCK_PROF:
|
||||
cpu->cpu = cputime_add(cputime.utime, cputime.stime);
|
||||
break;
|
||||
case CPUCLOCK_VIRT:
|
||||
cpu->cpu = cputime.utime;
|
||||
break;
|
||||
case CPUCLOCK_SCHED:
|
||||
cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Guts of sys_timer_settime for CPU timers.
|
||||
* This is called with the timer locked and interrupts disabled.
|
||||
|
@ -741,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|||
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
|
||||
cpu_clock_sample(timer->it_clock, p, &val);
|
||||
} else {
|
||||
cpu_clock_sample_group(timer->it_clock, p, &val);
|
||||
cpu_timer_sample_group(timer->it_clock, p, &val);
|
||||
}
|
||||
|
||||
if (old) {
|
||||
|
@ -889,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
|
|||
read_unlock(&tasklist_lock);
|
||||
goto dead;
|
||||
} else {
|
||||
cpu_clock_sample_group(timer->it_clock, p, &now);
|
||||
cpu_timer_sample_group(timer->it_clock, p, &now);
|
||||
clear_dead = (unlikely(p->exit_state) &&
|
||||
thread_group_empty(p));
|
||||
}
|
||||
|
@ -1244,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|||
clear_dead_task(timer, now);
|
||||
goto out_unlock;
|
||||
}
|
||||
cpu_clock_sample_group(timer->it_clock, p, &now);
|
||||
cpu_timer_sample_group(timer->it_clock, p, &now);
|
||||
bump_cpu_timer(timer, now);
|
||||
/* Leave the tasklist_lock locked for the call below. */
|
||||
}
|
||||
|
@ -1408,33 +1435,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sample a process (thread group) timer for the given group_leader task.
|
||||
* Must be called with tasklist_lock held for reading.
|
||||
*/
|
||||
static int cpu_timer_sample_group(const clockid_t which_clock,
|
||||
struct task_struct *p,
|
||||
union cpu_time_count *cpu)
|
||||
{
|
||||
struct task_cputime cputime;
|
||||
|
||||
thread_group_cputimer(p, &cputime);
|
||||
switch (CPUCLOCK_WHICH(which_clock)) {
|
||||
default:
|
||||
return -EINVAL;
|
||||
case CPUCLOCK_PROF:
|
||||
cpu->cpu = cputime_add(cputime.utime, cputime.stime);
|
||||
break;
|
||||
case CPUCLOCK_VIRT:
|
||||
cpu->cpu = cputime.utime;
|
||||
break;
|
||||
case CPUCLOCK_SCHED:
|
||||
cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set one of the process-wide special case CPU timers.
|
||||
* The tsk->sighand->siglock must be held by the caller.
|
||||
|
|
Loading…
Reference in New Issue