x86, cpufreq: Add APERF/MPERF support for AMD processors

Starting with model 10 of Family 0x10, AMD processors may have
support for APERF/MPERF.  Add support for identifying it and using
it within cpufreq.  Move the APERF/MPERF functions out of the
acpi-cpufreq code and into their own file so they can easily be
shared.

Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
LKML-Reference: <20100401141956.GA1930@aftab>
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Reviewed-by: Thomas Renninger <trenn@suse.de>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
Mark Langsdorf 2010-03-18 18:41:46 +01:00 committed by H. Peter Anvin
parent d65ad45cd8
commit a2fed573f0
5 changed files with 72 additions and 44 deletions

View File

@ -2,8 +2,8 @@
# K8 systems. ACPI is preferred to all other hardware-specific drivers. # K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod. # speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o

View File

@ -45,6 +45,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include "mperf.h"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"acpi-cpufreq", msg) "acpi-cpufreq", msg)
@ -70,8 +71,6 @@ struct acpi_cpufreq_data {
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* acpi_perf_data is a pointer to percpu data. */ /* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data; static struct acpi_processor_performance *acpi_perf_data;
@ -239,45 +238,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return cmd.val; return cmd.val;
} }
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
static unsigned int get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
static unsigned int get_cur_freq_on_cpu(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
@ -701,7 +661,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware */ /* Check for APERF/MPERF support in hardware */
if (cpu_has(c, X86_FEATURE_APERFMPERF)) if (cpu_has(c, X86_FEATURE_APERFMPERF))
acpi_cpufreq_driver.getavg = get_measured_perf; acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
dprintk("CPU%u - ACPI performance management activated.\n", cpu); dprintk("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++) for (i = 0; i < perf->state_count; i++)

View File

@ -0,0 +1,51 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include "mperf.h"
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,9 @@
/*
* (c) 2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu);

View File

@ -45,6 +45,7 @@
#define PFX "powernow-k8: " #define PFX "powernow-k8: "
#define VERSION "version 2.20.00" #define VERSION "version 2.20.00"
#include "powernow-k8.h" #include "powernow-k8.h"
#include "mperf.h"
/* serialize freq changes */ /* serialize freq changes */
static DEFINE_MUTEX(fidvid_mutex); static DEFINE_MUTEX(fidvid_mutex);
@ -57,6 +58,8 @@ static int cpu_family = CPU_OPTERON;
static bool cpb_capable, cpb_enabled; static bool cpb_capable, cpb_enabled;
static struct msr __percpu *msrs; static struct msr __percpu *msrs;
static struct cpufreq_driver cpufreq_amd64_driver;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline const struct cpumask *cpu_core_mask(int cpu) static inline const struct cpumask *cpu_core_mask(int cpu)
{ {
@ -1251,6 +1254,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct powernow_k8_data *data; struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu; struct init_on_cpu init_on_cpu;
int rc; int rc;
struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
return -ENODEV; return -ENODEV;
@ -1325,6 +1329,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL; return -EINVAL;
} }
/* Check for APERF/MPERF support in hardware */
if (cpu_has(c, X86_FEATURE_APERFMPERF))
cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)