Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
This commit is contained in:
commit
28e0cf22c1
|
@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
|
|||
|
||||
config X86_GX_SUSPMOD
|
||||
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
|
||||
depends on PCI
|
||||
help
|
||||
This add the CPUFreq driver for NatSemi Geode processors which
|
||||
support suspend modulation.
|
||||
|
|
|
@ -52,6 +52,7 @@ enum {
|
|||
|
||||
|
||||
static int has_N44_O17_errata[NR_CPUS];
|
||||
static int has_N60_errata[NR_CPUS];
|
||||
static unsigned int stock_freq;
|
||||
static struct cpufreq_driver p4clockmod_driver;
|
||||
static unsigned int cpufreq_p4_get(unsigned int cpu);
|
||||
|
@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
|||
case 0x0f12:
|
||||
has_N44_O17_errata[policy->cpu] = 1;
|
||||
dprintk("has errata -- disabling low frequencies\n");
|
||||
break;
|
||||
|
||||
case 0x0f29:
|
||||
has_N60_errata[policy->cpu] = 1;
|
||||
dprintk("has errata -- disabling frequencies lower than 2ghz\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* get max frequency */
|
||||
|
@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
|||
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
|
||||
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
|
||||
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
else
|
||||
p4clockmod_table[i].frequency = (stock_freq * i)/8;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
|
||||
|
||||
|
@ -55,7 +56,7 @@ static DECLARE_RWSEM (cpufreq_notifier_rwsem);
|
|||
|
||||
|
||||
static LIST_HEAD(cpufreq_governor_list);
|
||||
static DECLARE_MUTEX (cpufreq_governor_sem);
|
||||
static DEFINE_MUTEX (cpufreq_governor_mutex);
|
||||
|
||||
struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
|
||||
{
|
||||
|
@ -297,18 +298,18 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
|
|||
return -EINVAL;
|
||||
} else {
|
||||
struct cpufreq_governor *t;
|
||||
down(&cpufreq_governor_sem);
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
if (!cpufreq_driver || !cpufreq_driver->target)
|
||||
goto out;
|
||||
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
|
||||
if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
|
||||
*governor = t;
|
||||
up(&cpufreq_governor_sem);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
up(&cpufreq_governor_sem);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -600,7 +601,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
|||
policy->cpu = cpu;
|
||||
policy->cpus = cpumask_of_cpu(cpu);
|
||||
|
||||
init_MUTEX_LOCKED(&policy->lock);
|
||||
mutex_init(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
init_completion(&policy->kobj_unregister);
|
||||
INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
|
||||
|
||||
|
@ -610,6 +612,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
|||
ret = cpufreq_driver->init(policy);
|
||||
if (ret) {
|
||||
dprintk("initialization failed\n");
|
||||
mutex_unlock(&policy->lock);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
|
@ -621,9 +624,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
|||
strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
|
||||
|
||||
ret = kobject_register(&policy->kobj);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
mutex_unlock(&policy->lock);
|
||||
goto err_out_driver_exit;
|
||||
|
||||
}
|
||||
/* set up files for this cpu device */
|
||||
drv_attr = cpufreq_driver->attr;
|
||||
while ((drv_attr) && (*drv_attr)) {
|
||||
|
@ -641,7 +645,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
|
|||
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
policy->governor = NULL; /* to assure that the starting sequence is
|
||||
* run in cpufreq_set_policy */
|
||||
up(&policy->lock);
|
||||
mutex_unlock(&policy->lock);
|
||||
|
||||
/* set default policy */
|
||||
|
||||
|
@ -762,10 +766,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
|
|||
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
#endif
|
||||
|
||||
down(&data->lock);
|
||||
mutex_lock(&data->lock);
|
||||
if (cpufreq_driver->target)
|
||||
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
||||
up(&data->lock);
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
kobject_unregister(&data->kobj);
|
||||
|
||||
|
@ -834,9 +838,9 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
|
|||
unsigned int ret = 0;
|
||||
|
||||
if (policy) {
|
||||
down(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
ret = policy->cur;
|
||||
up(&policy->lock);
|
||||
mutex_unlock(&policy->lock);
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
|
@ -862,7 +866,7 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|||
if (!cpufreq_driver->get)
|
||||
goto out;
|
||||
|
||||
down(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
|
||||
ret = cpufreq_driver->get(cpu);
|
||||
|
||||
|
@ -875,7 +879,7 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
up(&policy->lock);
|
||||
mutex_unlock(&policy->lock);
|
||||
|
||||
out:
|
||||
cpufreq_cpu_put(policy);
|
||||
|
@ -1158,11 +1162,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
down(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
|
||||
ret = __cpufreq_driver_target(policy, target_freq, relation);
|
||||
|
||||
up(&policy->lock);
|
||||
mutex_unlock(&policy->lock);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
|
@ -1199,9 +1203,9 @@ int cpufreq_governor(unsigned int cpu, unsigned int event)
|
|||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
down(&policy->lock);
|
||||
mutex_lock(&policy->lock);
|
||||
ret = __cpufreq_governor(policy, event);
|
||||
up(&policy->lock);
|
||||
mutex_unlock(&policy->lock);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
|
@ -1217,17 +1221,17 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
|
|||
if (!governor)
|
||||
return -EINVAL;
|
||||
|
||||
down(&cpufreq_governor_sem);
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
|
||||
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
|
||||
if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
|
||||
up(&cpufreq_governor_sem);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
list_add(&governor->governor_list, &cpufreq_governor_list);
|
||||
|
||||
up(&cpufreq_governor_sem);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1239,9 +1243,9 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
|
|||
if (!governor)
|
||||
return;
|
||||
|
||||
down(&cpufreq_governor_sem);
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
list_del(&governor->governor_list);
|
||||
up(&cpufreq_governor_sem);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
|
||||
|
@ -1268,9 +1272,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
|
|||
if (!cpu_policy)
|
||||
return -EINVAL;
|
||||
|
||||
down(&cpu_policy->lock);
|
||||
mutex_lock(&cpu_policy->lock);
|
||||
memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
|
||||
up(&cpu_policy->lock);
|
||||
mutex_unlock(&cpu_policy->lock);
|
||||
|
||||
cpufreq_cpu_put(cpu_policy);
|
||||
|
||||
|
@ -1382,7 +1386,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
|
|||
return -EINVAL;
|
||||
|
||||
/* lock this CPU */
|
||||
down(&data->lock);
|
||||
mutex_lock(&data->lock);
|
||||
|
||||
ret = __cpufreq_set_policy(data, policy);
|
||||
data->user_policy.min = data->min;
|
||||
|
@ -1390,7 +1394,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
|
|||
data->user_policy.policy = data->policy;
|
||||
data->user_policy.governor = data->governor;
|
||||
|
||||
up(&data->lock);
|
||||
mutex_unlock(&data->lock);
|
||||
cpufreq_cpu_put(data);
|
||||
|
||||
return ret;
|
||||
|
@ -1414,7 +1418,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
|||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
down(&data->lock);
|
||||
mutex_lock(&data->lock);
|
||||
|
||||
dprintk("updating policy for CPU %u\n", cpu);
|
||||
memcpy(&policy,
|
||||
|
@ -1425,9 +1429,17 @@ int cpufreq_update_policy(unsigned int cpu)
|
|||
policy.policy = data->user_policy.policy;
|
||||
policy.governor = data->user_policy.governor;
|
||||
|
||||
/* BIOS might change freq behind our back
|
||||
-> ask driver for current freq and notify governors about a change */
|
||||
if (cpufreq_driver->get) {
|
||||
policy.cur = cpufreq_driver->get(cpu);
|
||||
if (data->cur != policy.cur)
|
||||
cpufreq_out_of_sync(cpu, data->cur, policy.cur);
|
||||
}
|
||||
|
||||
ret = __cpufreq_set_policy(data, &policy);
|
||||
|
||||
up(&data->lock);
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
cpufreq_cpu_put(data);
|
||||
return ret;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <linux/mutex.h>
|
||||
/*
|
||||
* dbs is used in this file as a shortform for demandbased switching
|
||||
* It helps to keep variable names smaller, simpler
|
||||
|
@ -71,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
|
|||
|
||||
static unsigned int dbs_enable; /* number of CPUs using this policy */
|
||||
|
||||
static DECLARE_MUTEX (dbs_sem);
|
||||
static DEFINE_MUTEX (dbs_mutex);
|
||||
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
|
||||
|
||||
struct dbs_tuners {
|
||||
|
@ -139,9 +139,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
|
|||
if (ret != 1 )
|
||||
return -EINVAL;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
dbs_tuners_ins.sampling_down_factor = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -153,14 +153,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
|
|||
int ret;
|
||||
ret = sscanf (buf, "%u", &input);
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.sampling_rate = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -172,16 +172,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
|
|||
int ret;
|
||||
ret = sscanf (buf, "%u", &input);
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
|
||||
input < MIN_FREQUENCY_UP_THRESHOLD ||
|
||||
input <= dbs_tuners_ins.down_threshold) {
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.up_threshold = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -193,16 +193,16 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
|
|||
int ret;
|
||||
ret = sscanf (buf, "%u", &input);
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
|
||||
input < MIN_FREQUENCY_DOWN_THRESHOLD ||
|
||||
input >= dbs_tuners_ins.up_threshold) {
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.down_threshold = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -222,9 +222,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
|
|||
if ( input > 1 )
|
||||
input = 1;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return count;
|
||||
}
|
||||
dbs_tuners_ins.ignore_nice = input;
|
||||
|
@ -236,7 +236,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
|
|||
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
|
||||
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
|
||||
}
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -257,9 +257,9 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
|
|||
|
||||
/* no need to test here if freq_step is zero as the user might actually
|
||||
* want this, they would be crazy though :) */
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
dbs_tuners_ins.freq_step = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -444,12 +444,12 @@ static void dbs_check_cpu(int cpu)
|
|||
static void do_dbs_timer(void *data)
|
||||
{
|
||||
int i;
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
for_each_online_cpu(i)
|
||||
dbs_check_cpu(i);
|
||||
schedule_delayed_work(&dbs_work,
|
||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_init(void)
|
||||
|
@ -487,7 +487,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (this_dbs_info->enable) /* Already enabled */
|
||||
break;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
for_each_cpu_mask(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
j_dbs_info = &per_cpu(cpu_dbs_info, j);
|
||||
|
@ -521,11 +521,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
dbs_timer_init();
|
||||
}
|
||||
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
this_dbs_info->enable = 0;
|
||||
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
|
||||
dbs_enable--;
|
||||
|
@ -536,12 +536,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (dbs_enable == 0)
|
||||
dbs_timer_exit();
|
||||
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
|
@ -550,7 +550,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/*
|
||||
* dbs is used in this file as a shortform for demandbased switching
|
||||
|
@ -70,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
|
|||
|
||||
static unsigned int dbs_enable; /* number of CPUs using this policy */
|
||||
|
||||
static DECLARE_MUTEX (dbs_sem);
|
||||
static DEFINE_MUTEX (dbs_mutex);
|
||||
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
|
||||
|
||||
struct dbs_tuners {
|
||||
|
@ -136,9 +137,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
|
|||
if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
|
||||
return -EINVAL;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
dbs_tuners_ins.sampling_down_factor = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -150,14 +151,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
|
|||
int ret;
|
||||
ret = sscanf (buf, "%u", &input);
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.sampling_rate = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -169,15 +170,15 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
|
|||
int ret;
|
||||
ret = sscanf (buf, "%u", &input);
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
|
||||
input < MIN_FREQUENCY_UP_THRESHOLD) {
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbs_tuners_ins.up_threshold = input;
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -197,9 +198,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
|
|||
if ( input > 1 )
|
||||
input = 1;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return count;
|
||||
}
|
||||
dbs_tuners_ins.ignore_nice = input;
|
||||
|
@ -211,7 +212,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
|
|||
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
|
||||
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
|
||||
}
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -356,12 +357,12 @@ static void dbs_check_cpu(int cpu)
|
|||
static void do_dbs_timer(void *data)
|
||||
{
|
||||
int i;
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
for_each_online_cpu(i)
|
||||
dbs_check_cpu(i);
|
||||
schedule_delayed_work(&dbs_work,
|
||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_init(void)
|
||||
|
@ -399,7 +400,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (this_dbs_info->enable) /* Already enabled */
|
||||
break;
|
||||
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
for_each_cpu_mask(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
j_dbs_info = &per_cpu(cpu_dbs_info, j);
|
||||
|
@ -435,11 +436,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
dbs_timer_init();
|
||||
}
|
||||
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
this_dbs_info->enable = 0;
|
||||
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
|
||||
dbs_enable--;
|
||||
|
@ -450,12 +451,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
if (dbs_enable == 0)
|
||||
dbs_timer_exit();
|
||||
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
down(&dbs_sem);
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
|
@ -464,7 +465,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
up(&dbs_sem);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
/*
|
||||
* linux/drivers/cpufreq/cpufreq_userspace.c
|
||||
*
|
||||
|
@ -21,6 +22,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -33,9 +35,8 @@ static unsigned int cpu_min_freq[NR_CPUS];
|
|||
static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */
|
||||
static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
|
||||
static unsigned int cpu_is_managed[NR_CPUS];
|
||||
static struct cpufreq_policy current_policy[NR_CPUS];
|
||||
|
||||
static DECLARE_MUTEX (userspace_sem);
|
||||
static DEFINE_MUTEX (userspace_mutex);
|
||||
|
||||
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
|
||||
|
||||
|
@ -64,35 +65,34 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
|
|||
*
|
||||
* Sets the CPU frequency to freq.
|
||||
*/
|
||||
static int cpufreq_set(unsigned int freq, unsigned int cpu)
|
||||
static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq);
|
||||
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
||||
|
||||
down(&userspace_sem);
|
||||
if (!cpu_is_managed[cpu])
|
||||
mutex_lock(&userspace_mutex);
|
||||
if (!cpu_is_managed[policy->cpu])
|
||||
goto err;
|
||||
|
||||
cpu_set_freq[cpu] = freq;
|
||||
cpu_set_freq[policy->cpu] = freq;
|
||||
|
||||
if (freq < cpu_min_freq[cpu])
|
||||
freq = cpu_min_freq[cpu];
|
||||
if (freq > cpu_max_freq[cpu])
|
||||
freq = cpu_max_freq[cpu];
|
||||
if (freq < cpu_min_freq[policy->cpu])
|
||||
freq = cpu_min_freq[policy->cpu];
|
||||
if (freq > cpu_max_freq[policy->cpu])
|
||||
freq = cpu_max_freq[policy->cpu];
|
||||
|
||||
/*
|
||||
* We're safe from concurrent calls to ->target() here
|
||||
* as we hold the userspace_sem lock. If we were calling
|
||||
* as we hold the userspace_mutex lock. If we were calling
|
||||
* cpufreq_driver_target, a deadlock situation might occur:
|
||||
* A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock)
|
||||
* B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem)
|
||||
* A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock)
|
||||
* B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex)
|
||||
*/
|
||||
ret = __cpufreq_driver_target(¤t_policy[cpu], freq,
|
||||
CPUFREQ_RELATION_L);
|
||||
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
||||
|
||||
err:
|
||||
up(&userspace_sem);
|
||||
mutex_unlock(&userspace_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
|
|||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
cpufreq_set(freq, policy->cpu);
|
||||
cpufreq_set(freq, policy);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -134,44 +134,48 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
|||
if (!cpu_online(cpu))
|
||||
return -EINVAL;
|
||||
BUG_ON(!policy->cur);
|
||||
down(&userspace_sem);
|
||||
mutex_lock(&userspace_mutex);
|
||||
cpu_is_managed[cpu] = 1;
|
||||
cpu_min_freq[cpu] = policy->min;
|
||||
cpu_max_freq[cpu] = policy->max;
|
||||
cpu_cur_freq[cpu] = policy->cur;
|
||||
cpu_set_freq[cpu] = policy->cur;
|
||||
sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
|
||||
memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy));
|
||||
dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
|
||||
up(&userspace_sem);
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
case CPUFREQ_GOV_STOP:
|
||||
down(&userspace_sem);
|
||||
mutex_lock(&userspace_mutex);
|
||||
cpu_is_managed[cpu] = 0;
|
||||
cpu_min_freq[cpu] = 0;
|
||||
cpu_max_freq[cpu] = 0;
|
||||
cpu_set_freq[cpu] = 0;
|
||||
sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
|
||||
dprintk("managing cpu %u stopped\n", cpu);
|
||||
up(&userspace_sem);
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
down(&userspace_sem);
|
||||
mutex_lock(&userspace_mutex);
|
||||
dprintk("limit event for cpu %u: %u - %u kHz,"
|
||||
"currently %u kHz, last set to %u kHz\n",
|
||||
cpu, policy->min, policy->max,
|
||||
cpu_cur_freq[cpu], cpu_set_freq[cpu]);
|
||||
if (policy->max < cpu_set_freq[cpu]) {
|
||||
__cpufreq_driver_target(policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
}
|
||||
else if (policy->min > cpu_set_freq[cpu]) {
|
||||
__cpufreq_driver_target(policy, policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
else {
|
||||
__cpufreq_driver_target(policy, cpu_set_freq[cpu],
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
cpu_min_freq[cpu] = policy->min;
|
||||
cpu_max_freq[cpu] = policy->max;
|
||||
dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]);
|
||||
if (policy->max < cpu_set_freq[cpu]) {
|
||||
__cpufreq_driver_target(¤t_policy[cpu], policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
} else if (policy->min > cpu_set_freq[cpu]) {
|
||||
__cpufreq_driver_target(¤t_policy[cpu], policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
} else {
|
||||
__cpufreq_driver_target(¤t_policy[cpu], cpu_set_freq[cpu],
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy));
|
||||
up(&userspace_sem);
|
||||
cpu_cur_freq[cpu] = policy->cur;
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#ifndef _LINUX_CPUFREQ_H
|
||||
#define _LINUX_CPUFREQ_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/config.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/threads.h>
|
||||
|
@ -82,7 +83,7 @@ struct cpufreq_policy {
|
|||
unsigned int policy; /* see above */
|
||||
struct cpufreq_governor *governor; /* see below */
|
||||
|
||||
struct semaphore lock; /* CPU ->setpolicy or ->target may
|
||||
struct mutex lock; /* CPU ->setpolicy or ->target may
|
||||
only be called once a time */
|
||||
|
||||
struct work_struct update; /* if update_policy() needs to be
|
||||
|
|
Loading…
Reference in New Issue