[CPUFREQ][5/8] acpi-cpufreq: lindent acpi-cpufreq.c
Lindent acpi-cpufreq. Additional changes replacing "return (..)" by "return ..". No functionality changes in this patch. Signed-off-by: Denis Sadykov <denis.m.sadykov@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
This commit is contained in:
parent
83d0515bbb
commit
64be7eedb2
|
@ -51,7 +51,6 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
|
|||
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
||||
enum {
|
||||
UNDEFINED_CAPABLE = 0,
|
||||
SYSTEM_INTEL_MSR_CAPABLE,
|
||||
|
@ -74,7 +73,6 @@ static struct cpufreq_driver acpi_cpufreq_driver;
|
|||
|
||||
static unsigned int acpi_pstate_strict;
|
||||
|
||||
|
||||
static int check_est_cpu(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
|
||||
|
@ -86,7 +84,6 @@ static int check_est_cpu(unsigned int cpuid)
|
|||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
struct acpi_processor_performance *perf;
|
||||
|
@ -101,7 +98,6 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
int i;
|
||||
|
@ -114,7 +110,6 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
|||
return data->freq_table[0].frequency;
|
||||
}
|
||||
|
||||
|
||||
static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
switch (data->cpu_feature) {
|
||||
|
@ -268,8 +263,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
|||
dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
|
||||
|
||||
if (unlikely(data == NULL ||
|
||||
data->acpi_data == NULL ||
|
||||
data->freq_table == NULL)) {
|
||||
data->acpi_data == NULL || data->freq_table == NULL)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -295,8 +289,7 @@ static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
|
|||
}
|
||||
|
||||
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = drv_data[policy->cpu];
|
||||
struct acpi_processor_performance *perf;
|
||||
|
@ -312,8 +305,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
|
||||
|
||||
if (unlikely(data == NULL ||
|
||||
data->acpi_data == NULL ||
|
||||
data->freq_table == NULL)) {
|
||||
data->acpi_data == NULL || data->freq_table == NULL)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -321,8 +313,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
result = cpufreq_frequency_table_target(policy,
|
||||
data->freq_table,
|
||||
target_freq,
|
||||
relation,
|
||||
&next_state);
|
||||
relation, &next_state);
|
||||
if (unlikely(result))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -339,10 +330,12 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
next_perf_state = data->freq_table[next_state].index;
|
||||
if (freqs.new == freqs.old) {
|
||||
if (unlikely(data->resume)) {
|
||||
dprintk("Called after resume, resetting to P%d\n", next_perf_state);
|
||||
dprintk("Called after resume, resetting to P%d\n",
|
||||
next_perf_state);
|
||||
data->resume = 0;
|
||||
} else {
|
||||
dprintk("Already at target state (P%d)\n", next_perf_state);
|
||||
dprintk("Already at target state (P%d)\n",
|
||||
next_perf_state);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -351,7 +344,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
|
||||
msr = (u32) perf->states[next_perf_state].control & INTEL_MSR_RANGE;
|
||||
msr =
|
||||
(u32) perf->states[next_perf_state].
|
||||
control & INTEL_MSR_RANGE;
|
||||
cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
|
@ -395,10 +390,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
acpi_cpufreq_verify (
|
||||
struct cpufreq_policy *policy)
|
||||
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = drv_data[policy->cpu];
|
||||
|
||||
|
@ -407,11 +399,8 @@ acpi_cpufreq_verify (
|
|||
return cpufreq_frequency_table_verify(policy, data->freq_table);
|
||||
}
|
||||
|
||||
|
||||
static unsigned long
|
||||
acpi_cpufreq_guess_freq (
|
||||
struct acpi_cpufreq_data *data,
|
||||
unsigned int cpu)
|
||||
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
{
|
||||
struct acpi_processor_performance *perf = data->acpi_data;
|
||||
|
||||
|
@ -426,11 +415,11 @@ acpi_cpufreq_guess_freq (
|
|||
freqn = perf->states[i + 1].core_frequency * 1000;
|
||||
if ((2 * cpu_khz) > (freqn + freq)) {
|
||||
perf->state = i;
|
||||
return (freq);
|
||||
return freq;
|
||||
}
|
||||
}
|
||||
perf->state = perf->state_count - 1;
|
||||
return (freqn);
|
||||
return freqn;
|
||||
} else {
|
||||
/* assume CPU is at P0... */
|
||||
perf->state = 0;
|
||||
|
@ -438,7 +427,6 @@ acpi_cpufreq_guess_freq (
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
|
@ -463,7 +451,7 @@ static int acpi_cpufreq_early_init(void)
|
|||
kfree(acpi_perf_data[j]);
|
||||
acpi_perf_data[j] = NULL;
|
||||
}
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
acpi_perf_data[i] = data;
|
||||
cpu_set(i, covered);
|
||||
|
@ -501,9 +489,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static int
|
||||
acpi_cpufreq_cpu_init (
|
||||
struct cpufreq_policy *policy)
|
||||
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int valid_states = 0;
|
||||
|
@ -517,11 +503,11 @@ acpi_cpufreq_cpu_init (
|
|||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
|
||||
if (!acpi_perf_data[cpu])
|
||||
return (-ENODEV);
|
||||
return -ENODEV;
|
||||
|
||||
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
data->acpi_data = acpi_perf_data[cpu];
|
||||
drv_data[cpu] = data;
|
||||
|
@ -585,7 +571,9 @@ acpi_cpufreq_cpu_init (
|
|||
goto err_unreg;
|
||||
}
|
||||
|
||||
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
|
||||
data->freq_table =
|
||||
kmalloc(sizeof(struct cpufreq_frequency_table) *
|
||||
(perf->state_count + 1), GFP_KERNEL);
|
||||
if (!data->freq_table) {
|
||||
result = -ENOMEM;
|
||||
goto err_unreg;
|
||||
|
@ -594,14 +582,15 @@ acpi_cpufreq_cpu_init (
|
|||
/* detect transition latency */
|
||||
policy->cpuinfo.transition_latency = 0;
|
||||
for (i = 0; i < perf->state_count; i++) {
|
||||
if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
|
||||
policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
|
||||
if ((perf->states[i].transition_latency * 1000) >
|
||||
policy->cpuinfo.transition_latency)
|
||||
policy->cpuinfo.transition_latency =
|
||||
perf->states[i].transition_latency * 1000;
|
||||
}
|
||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||
|
||||
/* table init */
|
||||
for (i=0; i<perf->state_count; i++)
|
||||
{
|
||||
for (i = 0; i < perf->state_count; i++) {
|
||||
if (i > 0 && perf->states[i].core_frequency ==
|
||||
perf->states[i - 1].core_frequency)
|
||||
continue;
|
||||
|
@ -659,44 +648,37 @@ acpi_cpufreq_cpu_init (
|
|||
kfree(data);
|
||||
drv_data[cpu] = NULL;
|
||||
|
||||
return (result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
acpi_cpufreq_cpu_exit (
|
||||
struct cpufreq_policy *policy)
|
||||
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = drv_data[policy->cpu];
|
||||
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_exit\n");
|
||||
|
||||
if (data) {
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
drv_data[policy->cpu] = NULL;
|
||||
acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
|
||||
acpi_processor_unregister_performance(data->acpi_data,
|
||||
policy->cpu);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
acpi_cpufreq_resume (
|
||||
struct cpufreq_policy *policy)
|
||||
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = drv_data[policy->cpu];
|
||||
|
||||
|
||||
dprintk("acpi_cpufreq_resume\n");
|
||||
|
||||
data->resume = 1;
|
||||
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
|
@ -714,9 +696,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
|||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
|
||||
static int __init
|
||||
acpi_cpufreq_init (void)
|
||||
static int __init acpi_cpufreq_init(void)
|
||||
{
|
||||
dprintk("acpi_cpufreq_init\n");
|
||||
|
||||
|
@ -725,9 +705,7 @@ acpi_cpufreq_init (void)
|
|||
return cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
}
|
||||
|
||||
|
||||
static void __exit
|
||||
acpi_cpufreq_exit (void)
|
||||
static void __exit acpi_cpufreq_exit(void)
|
||||
{
|
||||
unsigned int i;
|
||||
dprintk("acpi_cpufreq_exit\n");
|
||||
|
@ -742,7 +720,8 @@ acpi_cpufreq_exit (void)
|
|||
}
|
||||
|
||||
module_param(acpi_pstate_strict, uint, 0644);
|
||||
MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes.");
|
||||
MODULE_PARM_DESC(acpi_pstate_strict,
|
||||
"value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes.");
|
||||
|
||||
late_initcall(acpi_cpufreq_init);
|
||||
module_exit(acpi_cpufreq_exit);
|
||||
|
|
Loading…
Reference in New Issue