Merge back earlier cpuidle updates for v5.13.
This commit is contained in:
commit
71f4dd3441
|
@ -181,9 +181,13 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
|
|||
*/
|
||||
if (s->target_residency > 0)
|
||||
s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
|
||||
else if (s->target_residency_ns < 0)
|
||||
s->target_residency_ns = 0;
|
||||
|
||||
if (s->exit_latency > 0)
|
||||
s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
|
||||
else if (s->exit_latency_ns < 0)
|
||||
s->exit_latency_ns = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -271,7 +271,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
u64 predicted_ns;
|
||||
u64 interactivity_req;
|
||||
unsigned long nr_iowaiters;
|
||||
ktime_t delta_next;
|
||||
ktime_t delta, delta_tick;
|
||||
int i, idx;
|
||||
|
||||
if (data->needs_update) {
|
||||
|
@ -280,7 +280,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
}
|
||||
|
||||
/* determine the expected residency time, round up */
|
||||
data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
|
||||
delta = tick_nohz_get_sleep_length(&delta_tick);
|
||||
if (unlikely(delta < 0)) {
|
||||
delta = 0;
|
||||
delta_tick = 0;
|
||||
}
|
||||
data->next_timer_ns = delta;
|
||||
|
||||
nr_iowaiters = nr_iowait_cpu(dev->cpu);
|
||||
data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
|
||||
|
@ -318,7 +323,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
* state selection.
|
||||
*/
|
||||
if (predicted_ns < TICK_NSEC)
|
||||
predicted_ns = delta_next;
|
||||
predicted_ns = data->next_timer_ns;
|
||||
} else {
|
||||
/*
|
||||
* Use the performance multiplier and the user-configurable
|
||||
|
@ -377,7 +382,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
* stuck in the shallow one for too long.
|
||||
*/
|
||||
if (drv->states[idx].target_residency_ns < TICK_NSEC &&
|
||||
s->target_residency_ns <= delta_next)
|
||||
s->target_residency_ns <= delta_tick)
|
||||
idx = i;
|
||||
|
||||
return idx;
|
||||
|
@ -399,7 +404,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
|
||||
*stop_tick = false;
|
||||
|
||||
if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
|
||||
if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {
|
||||
/*
|
||||
* The tick is not going to be stopped and the target
|
||||
* residency of the state to be returned is not within
|
||||
|
@ -411,7 +416,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
continue;
|
||||
|
||||
idx = i;
|
||||
if (drv->states[i].target_residency_ns <= delta_next)
|
||||
if (drv->states[i].target_residency_ns <= delta_tick)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,8 +100,8 @@ struct teo_idle_state {
|
|||
* @intervals: Saved idle duration values.
|
||||
*/
|
||||
struct teo_cpu {
|
||||
u64 time_span_ns;
|
||||
u64 sleep_length_ns;
|
||||
s64 time_span_ns;
|
||||
s64 sleep_length_ns;
|
||||
struct teo_idle_state states[CPUIDLE_STATE_MAX];
|
||||
int interval_idx;
|
||||
u64 intervals[INTERVALS];
|
||||
|
@ -117,7 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
|
|||
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
|
||||
int i, idx_hit = -1, idx_timer = -1;
|
||||
int i, idx_hit = 0, idx_timer = 0;
|
||||
unsigned int hits, misses;
|
||||
u64 measured_ns;
|
||||
|
||||
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
|
||||
|
@ -174,25 +175,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
* also increase the "early hits" metric for the state that actually
|
||||
* matches the measured idle duration.
|
||||
*/
|
||||
if (idx_timer >= 0) {
|
||||
unsigned int hits = cpu_data->states[idx_timer].hits;
|
||||
unsigned int misses = cpu_data->states[idx_timer].misses;
|
||||
hits = cpu_data->states[idx_timer].hits;
|
||||
hits -= hits >> DECAY_SHIFT;
|
||||
|
||||
hits -= hits >> DECAY_SHIFT;
|
||||
misses -= misses >> DECAY_SHIFT;
|
||||
misses = cpu_data->states[idx_timer].misses;
|
||||
misses -= misses >> DECAY_SHIFT;
|
||||
|
||||
if (idx_timer > idx_hit) {
|
||||
misses += PULSE;
|
||||
if (idx_hit >= 0)
|
||||
cpu_data->states[idx_hit].early_hits += PULSE;
|
||||
} else {
|
||||
hits += PULSE;
|
||||
}
|
||||
|
||||
cpu_data->states[idx_timer].misses = misses;
|
||||
cpu_data->states[idx_timer].hits = hits;
|
||||
if (idx_timer == idx_hit) {
|
||||
hits += PULSE;
|
||||
} else {
|
||||
misses += PULSE;
|
||||
cpu_data->states[idx_hit].early_hits += PULSE;
|
||||
}
|
||||
|
||||
cpu_data->states[idx_timer].misses = misses;
|
||||
cpu_data->states[idx_timer].hits = hits;
|
||||
|
||||
/*
|
||||
* Save idle duration values corresponding to non-timer wakeups for
|
||||
* pattern detection.
|
||||
|
@ -216,7 +214,7 @@ static bool teo_time_ok(u64 interval_ns)
|
|||
*/
|
||||
static int teo_find_shallower_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, int state_idx,
|
||||
u64 duration_ns)
|
||||
s64 duration_ns)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -242,10 +240,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
{
|
||||
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
|
||||
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
|
||||
u64 duration_ns;
|
||||
int max_early_idx, prev_max_early_idx, constraint_idx, idx0, idx, i;
|
||||
unsigned int hits, misses, early_hits;
|
||||
int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
|
||||
ktime_t delta_tick;
|
||||
s64 duration_ns;
|
||||
|
||||
if (dev->last_state_idx >= 0) {
|
||||
teo_update(drv, dev);
|
||||
|
@ -264,6 +262,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
prev_max_early_idx = -1;
|
||||
constraint_idx = drv->state_count;
|
||||
idx = -1;
|
||||
idx0 = idx;
|
||||
|
||||
for (i = 0; i < drv->state_count; i++) {
|
||||
struct cpuidle_state *s = &drv->states[i];
|
||||
|
@ -324,6 +323,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
idx = i; /* first enabled state */
|
||||
hits = cpu_data->states[i].hits;
|
||||
misses = cpu_data->states[i].misses;
|
||||
idx0 = i;
|
||||
}
|
||||
|
||||
if (s->target_residency_ns > duration_ns)
|
||||
|
@ -376,11 +376,16 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
|
||||
if (idx < 0) {
|
||||
idx = 0; /* No states enabled. Must use 0. */
|
||||
} else if (idx > 0) {
|
||||
} else if (idx > idx0) {
|
||||
unsigned int count = 0;
|
||||
u64 sum = 0;
|
||||
|
||||
/*
|
||||
* The target residencies of at least two different enabled idle
|
||||
* states are less than or equal to the current expected idle
|
||||
* duration. Try to refine the selection using the most recent
|
||||
* measured idle duration values.
|
||||
*
|
||||
* Count and sum the most recent idle duration values less than
|
||||
* the current expected idle duration value.
|
||||
*/
|
||||
|
@ -428,7 +433,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
* till the closest timer including the tick, try to correct
|
||||
* that.
|
||||
*/
|
||||
if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
|
||||
if (idx > idx0 &&
|
||||
drv->states[idx].target_residency_ns > delta_tick)
|
||||
idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
|
||||
}
|
||||
|
||||
|
|
|
@ -744,8 +744,8 @@ static struct cpuidle_state icx_cstates[] __initdata = {
|
|||
.name = "C6",
|
||||
.desc = "MWAIT 0x20",
|
||||
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
||||
.exit_latency = 128,
|
||||
.target_residency = 384,
|
||||
.exit_latency = 170,
|
||||
.target_residency = 600,
|
||||
.enter = &intel_idle,
|
||||
.enter_s2idle = intel_idle_s2idle, },
|
||||
{
|
||||
|
@ -1156,6 +1156,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
|
||||
|
|
|
@ -49,8 +49,8 @@ struct cpuidle_state {
|
|||
char name[CPUIDLE_NAME_LEN];
|
||||
char desc[CPUIDLE_DESC_LEN];
|
||||
|
||||
u64 exit_latency_ns;
|
||||
u64 target_residency_ns;
|
||||
s64 exit_latency_ns;
|
||||
s64 target_residency_ns;
|
||||
unsigned int flags;
|
||||
unsigned int exit_latency; /* in US */
|
||||
int power_usage; /* in mW */
|
||||
|
|
|
@ -1124,7 +1124,11 @@ ktime_t tick_nohz_get_next_hrtimer(void)
|
|||
* tick_nohz_get_sleep_length - return the expected length of the current sleep
|
||||
* @delta_next: duration until the next event if the tick cannot be stopped
|
||||
*
|
||||
* Called from power state control code with interrupts disabled
|
||||
* Called from power state control code with interrupts disabled.
|
||||
*
|
||||
* The return value of this function and/or the value returned by it through the
|
||||
* @delta_next pointer can be negative which must be taken into account by its
|
||||
* callers.
|
||||
*/
|
||||
ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue