sched: Tweak sched_latency and min_granularity
Allow LAST_BUDDY to kick in sooner, improving cache utilization as soon as a second buddy pair arrives on scene. The cost is latency starting to climb sooner, the tbenefit for tbench 8 on my Q6600 box is ~2%. No detrimental effects noted in normal idesktop usage. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301285.6785.34.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a64692a3af
commit
21406928af
|
@ -35,8 +35,8 @@
|
|||
* (to see the precise effective timeslice length of your workload,
|
||||
* run vmstat and monitor the context-switches (cs) field)
|
||||
*/
|
||||
unsigned int sysctl_sched_latency = 5000000ULL;
|
||||
unsigned int normalized_sysctl_sched_latency = 5000000ULL;
|
||||
unsigned int sysctl_sched_latency = 6000000ULL;
|
||||
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
||||
|
||||
/*
|
||||
* The initial- and re-scaling of tunables is configurable
|
||||
|
@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
|
|||
|
||||
/*
|
||||
* Minimal preemption granularity for CPU-bound tasks:
|
||||
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
* (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*/
|
||||
unsigned int sysctl_sched_min_granularity = 1000000ULL;
|
||||
unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
|
||||
unsigned int sysctl_sched_min_granularity = 2000000ULL;
|
||||
unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
|
||||
|
||||
/*
|
||||
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
|
||||
*/
|
||||
static unsigned int sched_nr_latency = 5;
|
||||
static unsigned int sched_nr_latency = 3;
|
||||
|
||||
/*
|
||||
* After fork, child runs first. If set to 0 (default) then
|
||||
|
|
Loading…
Reference in New Issue