Merge branch 'cpuidle' into release-2.6.27
This commit is contained in:
commit
22d9aac235
|
@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
||||||
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
||||||
struct ladder_device_state *last_state;
|
struct ladder_device_state *last_state;
|
||||||
int last_residency, last_idx = ldev->last_state_idx;
|
int last_residency, last_idx = ldev->last_state_idx;
|
||||||
|
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
|
||||||
|
|
||||||
if (unlikely(!ldev))
|
if (unlikely(!ldev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* Special case when user has set very strict latency requirement */
|
||||||
|
if (unlikely(latency_req == 0)) {
|
||||||
|
ladder_do_selection(ldev, last_idx, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
last_state = &ldev->states[last_idx];
|
last_state = &ldev->states[last_idx];
|
||||||
|
|
||||||
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
|
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
|
||||||
|
@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
||||||
/* consider promotion */
|
/* consider promotion */
|
||||||
if (last_idx < dev->state_count - 1 &&
|
if (last_idx < dev->state_count - 1 &&
|
||||||
last_residency > last_state->threshold.promotion_time &&
|
last_residency > last_state->threshold.promotion_time &&
|
||||||
dev->states[last_idx + 1].exit_latency <=
|
dev->states[last_idx + 1].exit_latency <= latency_req) {
|
||||||
pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
|
|
||||||
last_state->stats.promotion_count++;
|
last_state->stats.promotion_count++;
|
||||||
last_state->stats.demotion_count = 0;
|
last_state->stats.demotion_count = 0;
|
||||||
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
|
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
|
||||||
|
@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* consider demotion */
|
/* consider demotion */
|
||||||
if (last_idx > 0 &&
|
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
|
||||||
|
dev->states[last_idx].exit_latency > latency_req) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
|
||||||
|
if (dev->states[i].exit_latency <= latency_req)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ladder_do_selection(ldev, last_idx, i);
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
|
||||||
last_residency < last_state->threshold.demotion_time) {
|
last_residency < last_state->threshold.demotion_time) {
|
||||||
last_state->stats.demotion_count++;
|
last_state->stats.demotion_count++;
|
||||||
last_state->stats.promotion_count = 0;
|
last_state->stats.promotion_count = 0;
|
||||||
|
@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
|
||||||
struct ladder_device_state *lstate;
|
struct ladder_device_state *lstate;
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state *state;
|
||||||
|
|
||||||
ldev->last_state_idx = 0;
|
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
||||||
|
|
||||||
for (i = 0; i < dev->state_count; i++) {
|
for (i = 0; i < dev->state_count; i++) {
|
||||||
state = &dev->states[i];
|
state = &dev->states[i];
|
||||||
|
|
|
@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
||||||
static int menu_select(struct cpuidle_device *dev)
|
static int menu_select(struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||||
|
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/* Special case when user has set very strict latency requirement */
|
||||||
|
if (unlikely(latency_req == 0)) {
|
||||||
|
data->last_state_idx = 0;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* determine the expected residency time */
|
/* determine the expected residency time */
|
||||||
data->expected_us =
|
data->expected_us =
|
||||||
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
|
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
|
||||||
|
|
||||||
/* find the deepest idle state that satisfies our constraints */
|
/* find the deepest idle state that satisfies our constraints */
|
||||||
for (i = 1; i < dev->state_count; i++) {
|
for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
|
||||||
struct cpuidle_state *s = &dev->states[i];
|
struct cpuidle_state *s = &dev->states[i];
|
||||||
|
|
||||||
if (s->target_residency > data->expected_us)
|
if (s->target_residency > data->expected_us)
|
||||||
break;
|
break;
|
||||||
if (s->target_residency > data->predicted_us)
|
if (s->target_residency > data->predicted_us)
|
||||||
break;
|
break;
|
||||||
if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
|
if (s->exit_latency > latency_req)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||||
int last_idx = data->last_state_idx;
|
int last_idx = data->last_state_idx;
|
||||||
unsigned int measured_us =
|
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
|
||||||
cpuidle_get_last_residency(dev) + data->elapsed_us;
|
|
||||||
struct cpuidle_state *target = &dev->states[last_idx];
|
struct cpuidle_state *target = &dev->states[last_idx];
|
||||||
|
unsigned int measured_us;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ugh, this idle state doesn't support residency measurements, so we
|
* Ugh, this idle state doesn't support residency measurements, so we
|
||||||
|
@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
|
||||||
* for one full standard timer tick. However, be aware that this
|
* for one full standard timer tick. However, be aware that this
|
||||||
* could potentially result in a suboptimal state transition.
|
* could potentially result in a suboptimal state transition.
|
||||||
*/
|
*/
|
||||||
if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
|
if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
|
||||||
measured_us = USEC_PER_SEC / HZ;
|
last_idle_us = USEC_PER_SEC / HZ;
|
||||||
|
|
||||||
/* Predict time remaining until next break event */
|
/*
|
||||||
if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
|
* measured_us and elapsed_us are the cumulative idle time, since the
|
||||||
data->predicted_us = max(measured_us, data->last_measured_us);
|
* last time we were woken out of idle by an interrupt.
|
||||||
|
*/
|
||||||
|
if (data->elapsed_us <= data->elapsed_us + last_idle_us)
|
||||||
|
measured_us = data->elapsed_us + last_idle_us;
|
||||||
|
else
|
||||||
|
measured_us = -1;
|
||||||
|
|
||||||
|
/* Predict time until next break event */
|
||||||
|
data->predicted_us = max(measured_us, data->last_measured_us);
|
||||||
|
|
||||||
|
if (last_idle_us + BREAK_FUZZ <
|
||||||
|
data->expected_us - target->exit_latency) {
|
||||||
data->last_measured_us = measured_us;
|
data->last_measured_us = measured_us;
|
||||||
data->elapsed_us = 0;
|
data->elapsed_us = 0;
|
||||||
} else {
|
} else {
|
||||||
if (data->elapsed_us < data->elapsed_us + measured_us)
|
data->elapsed_us = measured_us;
|
||||||
data->elapsed_us = measured_us;
|
|
||||||
else
|
|
||||||
data->elapsed_us = -1;
|
|
||||||
data->predicted_us = max(measured_us, data->last_measured_us);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue