[CPUFREQ] ondemand governor restructure the work callback
Restructure the delayed_work callback in ondemand. This eliminates the need for smp_processor_id in the callback function and also helps in proper locking and avoiding flush_workqueue when stopping the governor (done in subsequent patch). Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dave Jones <davej@redhat.com>
This commit is contained in:
parent
5a01f2e8f3
commit
529af7a14f
|
@ -52,19 +52,20 @@ static unsigned int def_sampling_rate;
|
||||||
static void do_dbs_timer(struct work_struct *work);
|
static void do_dbs_timer(struct work_struct *work);
|
||||||
|
|
||||||
/* Sampling types */
|
/* Sampling types */
|
||||||
enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
|
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
|
||||||
|
|
||||||
struct cpu_dbs_info_s {
|
struct cpu_dbs_info_s {
|
||||||
cputime64_t prev_cpu_idle;
|
cputime64_t prev_cpu_idle;
|
||||||
cputime64_t prev_cpu_wall;
|
cputime64_t prev_cpu_wall;
|
||||||
struct cpufreq_policy *cur_policy;
|
struct cpufreq_policy *cur_policy;
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
enum dbs_sample sample_type;
|
|
||||||
unsigned int enable;
|
|
||||||
struct cpufreq_frequency_table *freq_table;
|
struct cpufreq_frequency_table *freq_table;
|
||||||
unsigned int freq_lo;
|
unsigned int freq_lo;
|
||||||
unsigned int freq_lo_jiffies;
|
unsigned int freq_lo_jiffies;
|
||||||
unsigned int freq_hi_jiffies;
|
unsigned int freq_hi_jiffies;
|
||||||
|
int cpu;
|
||||||
|
unsigned int enable:1,
|
||||||
|
sample_type:1;
|
||||||
};
|
};
|
||||||
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
|
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
|
||||||
|
|
||||||
|
@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
if (load < (dbs_tuners_ins.up_threshold - 10)) {
|
if (load < (dbs_tuners_ins.up_threshold - 10)) {
|
||||||
unsigned int freq_next, freq_cur;
|
unsigned int freq_next, freq_cur;
|
||||||
|
|
||||||
freq_cur = cpufreq_driver_getavg(policy);
|
freq_cur = __cpufreq_driver_getavg(policy);
|
||||||
if (!freq_cur)
|
if (!freq_cur)
|
||||||
freq_cur = policy->cur;
|
freq_cur = policy->cur;
|
||||||
|
|
||||||
|
@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
|
|
||||||
static void do_dbs_timer(struct work_struct *work)
|
static void do_dbs_timer(struct work_struct *work)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
struct cpu_dbs_info_s *dbs_info =
|
||||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
container_of(work, struct cpu_dbs_info_s, work.work);
|
||||||
enum dbs_sample sample_type = dbs_info->sample_type;
|
unsigned int cpu = dbs_info->cpu;
|
||||||
|
int sample_type = dbs_info->sample_type;
|
||||||
|
|
||||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||||
|
|
||||||
|
@ -454,17 +457,17 @@ static void do_dbs_timer(struct work_struct *work)
|
||||||
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
|
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dbs_timer_init(unsigned int cpu)
|
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
||||||
{
|
{
|
||||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
|
||||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||||
delay -= jiffies % delay;
|
delay -= jiffies % delay;
|
||||||
|
|
||||||
ondemand_powersave_bias_init();
|
ondemand_powersave_bias_init();
|
||||||
INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
|
|
||||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||||
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
|
INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
|
||||||
|
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
|
||||||
|
delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
|
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
|
||||||
|
@ -528,6 +531,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||||
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
|
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
|
||||||
j_dbs_info->prev_cpu_wall = get_jiffies_64();
|
j_dbs_info->prev_cpu_wall = get_jiffies_64();
|
||||||
}
|
}
|
||||||
|
this_dbs_info->cpu = cpu;
|
||||||
this_dbs_info->enable = 1;
|
this_dbs_info->enable = 1;
|
||||||
/*
|
/*
|
||||||
* Start the timerschedule work, when this governor
|
* Start the timerschedule work, when this governor
|
||||||
|
@ -548,7 +552,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||||
|
|
||||||
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
||||||
}
|
}
|
||||||
dbs_timer_init(policy->cpu);
|
dbs_timer_init(this_dbs_info);
|
||||||
|
|
||||||
mutex_unlock(&dbs_mutex);
|
mutex_unlock(&dbs_mutex);
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in New Issue