cpufreq: ondemand: call dbs_check_cpu only when necessary

Modify ondemand timer to not resample CPU utilization if recently
sampled from another SW coordinated core.

Signed-off-by: Fabio Baltieri <fabio.baltieri@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Fabio Baltieri 2012-12-27 14:55:40 +00:00 committed by Rafael J. Wysocki
parent 2abfa876f1
commit da53d61e21
3 changed files with 52 additions and 10 deletions

View File

@ -290,6 +290,9 @@ second_time:
mutex_unlock(&dbs_data->mutex);
if (dbs_sw_coordinated_cpus(cpu_cdbs)) {
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;

View File

@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
};
struct od_cpu_dbs_info_s {

View File

@ -216,22 +216,22 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
}
}
static void od_dbs_timer(struct work_struct *work)
static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
struct delayed_work *dw)
{
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cpu;
int delay, sample_type = dbs_info->sample_type;
mutex_lock(&dbs_info->cdbs.timer_mutex);
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
delay = dbs_info->freq_lo_jiffies;
if (sample)
__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
dbs_info->freq_lo,
CPUFREQ_RELATION_H);
} else {
if (sample)
dbs_check_cpu(&od_dbs_data, cpu);
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
@ -243,11 +243,49 @@ static void od_dbs_timer(struct work_struct *work)
}
}
schedule_delayed_work_on(smp_processor_id(), &dbs_info->cdbs.work,
delay);
schedule_delayed_work_on(smp_processor_id(), dw, delay);
}
static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
struct delayed_work *dw)
{
struct od_cpu_dbs_info_s *dbs_info;
ktime_t time_now;
s64 delta_us;
bool sample = true;
/* use leader CPU's dbs_info */
dbs_info = &per_cpu(od_cpu_dbs_info, dbs_info_local->cdbs.cpu);
mutex_lock(&dbs_info->cdbs.timer_mutex);
time_now = ktime_get();
delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
/* Do nothing if we recently have sampled */
if (delta_us < (s64)(od_tuners.sampling_rate / 2))
sample = false;
else
dbs_info->cdbs.time_stamp = time_now;
od_timer_update(dbs_info, sample, dw);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
static void od_dbs_timer(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
if (dbs_sw_coordinated_cpus(&dbs_info->cdbs)) {
od_timer_coordinated(dbs_info, dw);
} else {
mutex_lock(&dbs_info->cdbs.timer_mutex);
od_timer_update(dbs_info, true, dw);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
}
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_min(struct kobject *kobj,