x86/cpu: Clean up various files a bit
No code changes except printk levels (although some of the K6 mtrr code might be clearer if there were a few as would splitting out some of the intel cache code). Signed-off-by: Alan Cox <alan@linux.intel.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
e90476d3ba
commit
8bdbd962ec
|
@ -2,7 +2,7 @@
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <linux/io.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
|
||||||
#define CBAR_ENB (0x80000000)
|
#define CBAR_ENB (0x80000000)
|
||||||
#define CBAR_KEY (0X000000CB)
|
#define CBAR_KEY (0X000000CB)
|
||||||
if (c->x86_model == 9 || c->x86_model == 10) {
|
if (c->x86_model == 9 || c->x86_model == 10) {
|
||||||
if (inl (CBAR) & CBAR_ENB)
|
if (inl(CBAR) & CBAR_ENB)
|
||||||
outl (0 | CBAR_KEY, CBAR);
|
outl(0 | CBAR_KEY, CBAR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
|
||||||
d = d2-d;
|
d = d2-d;
|
||||||
|
|
||||||
if (d > 20*K6_BUG_LOOP)
|
if (d > 20*K6_BUG_LOOP)
|
||||||
printk("system stability may be impaired when more than 32 MB are used.\n");
|
printk(KERN_CONT
|
||||||
|
"system stability may be impaired when more than 32 MB are used.\n");
|
||||||
else
|
else
|
||||||
printk("probably OK (after B9730xxxx).\n");
|
printk(KERN_CONT "probably OK (after B9730xxxx).\n");
|
||||||
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
|
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
|
||||||
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
|
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
|
||||||
rdmsr(MSR_K7_CLK_CTL, l, h);
|
rdmsr(MSR_K7_CLK_CTL, l, h);
|
||||||
if ((l & 0xfff00000) != 0x20000000) {
|
if ((l & 0xfff00000) != 0x20000000) {
|
||||||
printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
|
printk(KERN_INFO
|
||||||
((l & 0x000fffff)|0x20000000));
|
"CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
|
||||||
|
l, ((l & 0x000fffff)|0x20000000));
|
||||||
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
|
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -398,7 +400,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
u32 level;
|
u32 level;
|
||||||
|
|
||||||
level = cpuid_eax(1);
|
level = cpuid_eax(1);
|
||||||
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
|
if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
|
||||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||||
}
|
}
|
||||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||||
|
@ -487,27 +489,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||||
* benefit in doing so.
|
* benefit in doing so.
|
||||||
*/
|
*/
|
||||||
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
|
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
|
||||||
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
|
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
|
||||||
if ((tseg>>PMD_SHIFT) <
|
if ((tseg>>PMD_SHIFT) <
|
||||||
(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
|
(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
|
||||||
((tseg>>PMD_SHIFT) <
|
((tseg>>PMD_SHIFT) <
|
||||||
(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
|
(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
|
||||||
(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
|
(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
|
||||||
set_memory_4k((unsigned long)__va(tseg), 1);
|
set_memory_4k((unsigned long)__va(tseg), 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
|
||||||
|
unsigned int size)
|
||||||
{
|
{
|
||||||
/* AMD errata T13 (order #21922) */
|
/* AMD errata T13 (order #21922) */
|
||||||
if ((c->x86 == 6)) {
|
if ((c->x86 == 6)) {
|
||||||
if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
|
/* Duron Rev A0 */
|
||||||
|
if (c->x86_model == 3 && c->x86_mask == 0)
|
||||||
size = 64;
|
size = 64;
|
||||||
|
/* Tbird rev A1/A2 */
|
||||||
if (c->x86_model == 4 &&
|
if (c->x86_model == 4 &&
|
||||||
(c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */
|
(c->x86_mask == 0 || c->x86_mask == 1))
|
||||||
size = 256;
|
size = 256;
|
||||||
}
|
}
|
||||||
return size;
|
return size;
|
||||||
|
|
|
@ -81,7 +81,7 @@ static void __init check_fpu(void)
|
||||||
|
|
||||||
boot_cpu_data.fdiv_bug = fdiv_bug;
|
boot_cpu_data.fdiv_bug = fdiv_bug;
|
||||||
if (boot_cpu_data.fdiv_bug)
|
if (boot_cpu_data.fdiv_bug)
|
||||||
printk("Hmm, FPU with FDIV bug.\n");
|
printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init check_hlt(void)
|
static void __init check_hlt(void)
|
||||||
|
@ -98,7 +98,7 @@ static void __init check_hlt(void)
|
||||||
halt();
|
halt();
|
||||||
halt();
|
halt();
|
||||||
halt();
|
halt();
|
||||||
printk("OK.\n");
|
printk(KERN_CONT "OK.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -122,9 +122,9 @@ static void __init check_popad(void)
|
||||||
* CPU hard. Too bad.
|
* CPU hard. Too bad.
|
||||||
*/
|
*/
|
||||||
if (res != 12345678)
|
if (res != 12345678)
|
||||||
printk("Buggy.\n");
|
printk(KERN_CONT "Buggy.\n");
|
||||||
else
|
else
|
||||||
printk("OK.\n");
|
printk(KERN_CONT "OK.\n");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +156,7 @@ void __init check_bugs(void)
|
||||||
{
|
{
|
||||||
identify_boot_cpu();
|
identify_boot_cpu();
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
printk("CPU: ");
|
printk(KERN_INFO "CPU: ");
|
||||||
print_cpu_info(&boot_cpu_data);
|
print_cpu_info(&boot_cpu_data);
|
||||||
#endif
|
#endif
|
||||||
check_config();
|
check_config();
|
||||||
|
|
|
@ -15,7 +15,7 @@ void __init check_bugs(void)
|
||||||
{
|
{
|
||||||
identify_boot_cpu();
|
identify_boot_cpu();
|
||||||
#if !defined(CONFIG_SMP)
|
#if !defined(CONFIG_SMP)
|
||||||
printk("CPU: ");
|
printk(KERN_INFO "CPU: ");
|
||||||
print_cpu_info(&boot_cpu_data);
|
print_cpu_info(&boot_cpu_data);
|
||||||
#endif
|
#endif
|
||||||
alternative_instructions();
|
alternative_instructions();
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/topology.h>
|
#include <linux/topology.h>
|
||||||
#include <asm/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
|
@ -28,13 +28,13 @@
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/i387.h>
|
#include <asm/i387.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
#include <asm/numa.h>
|
#include <linux/numa.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/pat.h>
|
#include <asm/pat.h>
|
||||||
#include <asm/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
#include <asm/uv/uv.h>
|
#include <asm/uv/uv.h>
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/io.h>
|
#include <linux/io.h>
|
||||||
#include <asm/processor-cyrix.h>
|
#include <asm/processor-cyrix.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
#include <asm/timer.h>
|
#include <linux/timer.h>
|
||||||
#include <asm/pci-direct.h>
|
#include <asm/pci-direct.h>
|
||||||
#include <asm/tsc.h>
|
#include <asm/tsc.h>
|
||||||
|
|
||||||
|
@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
* The 5510/5520 companion chips have a funky PIT.
|
* The 5510/5520 companion chips have a funky PIT.
|
||||||
*/
|
*/
|
||||||
if (vendor == PCI_VENDOR_ID_CYRIX &&
|
if (vendor == PCI_VENDOR_ID_CYRIX &&
|
||||||
(device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
|
(device == PCI_DEVICE_ID_CYRIX_5510 ||
|
||||||
|
device == PCI_DEVICE_ID_CYRIX_5520))
|
||||||
mark_tsc_unstable("cyrix 5510/5520 detected");
|
mark_tsc_unstable("cyrix 5510/5520 detected");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||||
* ? : 0x7x
|
* ? : 0x7x
|
||||||
* GX1 : 0x8x GX1 datasheet 56
|
* GX1 : 0x8x GX1 datasheet 56
|
||||||
*/
|
*/
|
||||||
if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f))
|
if ((0x30 <= dir1 && dir1 <= 0x6f) ||
|
||||||
|
(0x80 <= dir1 && dir1 <= 0x8f))
|
||||||
geode_configure();
|
geode_configure();
|
||||||
return;
|
return;
|
||||||
} else { /* MediaGX */
|
} else { /* MediaGX */
|
||||||
|
@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
|
||||||
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
|
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
ccr3 = getCx86(CX86_CCR3);
|
ccr3 = getCx86(CX86_CCR3);
|
||||||
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
|
/* enable MAPEN */
|
||||||
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
|
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
|
||||||
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
|
/* enable cpuid */
|
||||||
|
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
|
||||||
|
/* disable MAPEN */
|
||||||
|
setCx86(CX86_CCR3, ccr3);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,11 +28,10 @@
|
||||||
static inline void __cpuinit
|
static inline void __cpuinit
|
||||||
detect_hypervisor_vendor(struct cpuinfo_x86 *c)
|
detect_hypervisor_vendor(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
if (vmware_platform()) {
|
if (vmware_platform())
|
||||||
c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
|
c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
|
||||||
} else {
|
else
|
||||||
c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
|
c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long get_hypervisor_tsc_freq(void)
|
unsigned long get_hypervisor_tsc_freq(void)
|
||||||
|
|
|
@ -7,17 +7,17 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/thread_info.h>
|
#include <linux/thread_info.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/uaccess.h>
|
|
||||||
#include <asm/ds.h>
|
#include <asm/ds.h>
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#include <asm/topology.h>
|
#include <linux/topology.h>
|
||||||
#include <asm/numa_64.h>
|
#include <asm/numa_64.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||||
#ifdef CONFIG_X86_F00F_BUG
|
#ifdef CONFIG_X86_F00F_BUG
|
||||||
/*
|
/*
|
||||||
* All current models of Pentium and Pentium with MMX technology CPUs
|
* All current models of Pentium and Pentium with MMX technology CPUs
|
||||||
* have the F0 0F bug, which lets nonprivileged users lock up the system.
|
* have the F0 0F bug, which lets nonprivileged users lock up the
|
||||||
|
* system.
|
||||||
* Note that the workaround only should be initialized once...
|
* Note that the workaround only should be initialized once...
|
||||||
*/
|
*/
|
||||||
c->f00f_bug = 0;
|
c->f00f_bug = 0;
|
||||||
|
@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||||
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
|
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
|
||||||
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
|
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
|
||||||
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
|
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
|
||||||
wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
|
wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
|
||||||
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
|
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
|
||||||
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
|
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
|
||||||
if (eax & 0x1f)
|
if (eax & 0x1f)
|
||||||
return ((eax >> 26) + 1);
|
return (eax >> 26) + 1;
|
||||||
else
|
else
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
*
|
*
|
||||||
* Changes:
|
* Changes:
|
||||||
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
|
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
|
||||||
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
|
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
|
||||||
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
|
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <asm/k8.h>
|
#include <asm/k8.h>
|
||||||
|
|
||||||
#define LVL_1_INST 1
|
#define LVL_1_INST 1
|
||||||
|
@ -25,14 +25,15 @@
|
||||||
#define LVL_3 4
|
#define LVL_3 4
|
||||||
#define LVL_TRACE 5
|
#define LVL_TRACE 5
|
||||||
|
|
||||||
struct _cache_table
|
struct _cache_table {
|
||||||
{
|
|
||||||
unsigned char descriptor;
|
unsigned char descriptor;
|
||||||
char cache_type;
|
char cache_type;
|
||||||
short size;
|
short size;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* all the cache descriptor types we care about (no TLB or trace cache entries) */
|
/* All the cache descriptor types we care about (no TLB or
|
||||||
|
trace cache entries) */
|
||||||
|
|
||||||
static const struct _cache_table __cpuinitconst cache_table[] =
|
static const struct _cache_table __cpuinitconst cache_table[] =
|
||||||
{
|
{
|
||||||
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
||||||
|
@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
enum _cache_type
|
enum _cache_type {
|
||||||
{
|
|
||||||
CACHE_TYPE_NULL = 0,
|
CACHE_TYPE_NULL = 0,
|
||||||
CACHE_TYPE_DATA = 1,
|
CACHE_TYPE_DATA = 1,
|
||||||
CACHE_TYPE_INST = 2,
|
CACHE_TYPE_INST = 2,
|
||||||
|
@ -170,31 +170,31 @@ unsigned short num_cache_leaves;
|
||||||
Maybe later */
|
Maybe later */
|
||||||
union l1_cache {
|
union l1_cache {
|
||||||
struct {
|
struct {
|
||||||
unsigned line_size : 8;
|
unsigned line_size:8;
|
||||||
unsigned lines_per_tag : 8;
|
unsigned lines_per_tag:8;
|
||||||
unsigned assoc : 8;
|
unsigned assoc:8;
|
||||||
unsigned size_in_kb : 8;
|
unsigned size_in_kb:8;
|
||||||
};
|
};
|
||||||
unsigned val;
|
unsigned val;
|
||||||
};
|
};
|
||||||
|
|
||||||
union l2_cache {
|
union l2_cache {
|
||||||
struct {
|
struct {
|
||||||
unsigned line_size : 8;
|
unsigned line_size:8;
|
||||||
unsigned lines_per_tag : 4;
|
unsigned lines_per_tag:4;
|
||||||
unsigned assoc : 4;
|
unsigned assoc:4;
|
||||||
unsigned size_in_kb : 16;
|
unsigned size_in_kb:16;
|
||||||
};
|
};
|
||||||
unsigned val;
|
unsigned val;
|
||||||
};
|
};
|
||||||
|
|
||||||
union l3_cache {
|
union l3_cache {
|
||||||
struct {
|
struct {
|
||||||
unsigned line_size : 8;
|
unsigned line_size:8;
|
||||||
unsigned lines_per_tag : 4;
|
unsigned lines_per_tag:4;
|
||||||
unsigned assoc : 4;
|
unsigned assoc:4;
|
||||||
unsigned res : 2;
|
unsigned res:2;
|
||||||
unsigned size_encoded : 14;
|
unsigned size_encoded:14;
|
||||||
};
|
};
|
||||||
unsigned val;
|
unsigned val;
|
||||||
};
|
};
|
||||||
|
@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void)
|
||||||
|
|
||||||
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
|
/* Cache sizes */
|
||||||
|
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
|
||||||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||||
|
@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
|
|
||||||
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
|
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
|
||||||
if (retval >= 0) {
|
if (retval >= 0) {
|
||||||
switch(this_leaf.eax.split.level) {
|
switch (this_leaf.eax.split.level) {
|
||||||
case 1:
|
case 1:
|
||||||
if (this_leaf.eax.split.type ==
|
if (this_leaf.eax.split.type ==
|
||||||
CACHE_TYPE_DATA)
|
CACHE_TYPE_DATA)
|
||||||
new_l1d = this_leaf.size/1024;
|
new_l1d = this_leaf.size/1024;
|
||||||
|
@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
CACHE_TYPE_INST)
|
CACHE_TYPE_INST)
|
||||||
new_l1i = this_leaf.size/1024;
|
new_l1i = this_leaf.size/1024;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
new_l2 = this_leaf.size/1024;
|
new_l2 = this_leaf.size/1024;
|
||||||
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
||||||
index_msb = get_count_order(num_threads_sharing);
|
index_msb = get_count_order(num_threads_sharing);
|
||||||
l2_id = c->apicid >> index_msb;
|
l2_id = c->apicid >> index_msb;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
new_l3 = this_leaf.size/1024;
|
new_l3 = this_leaf.size/1024;
|
||||||
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
||||||
index_msb = get_count_order(num_threads_sharing);
|
index_msb = get_count_order(
|
||||||
|
num_threads_sharing);
|
||||||
l3_id = c->apicid >> index_msb;
|
l3_id = c->apicid >> index_msb;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
/* Number of times to iterate */
|
/* Number of times to iterate */
|
||||||
n = cpuid_eax(2) & 0xFF;
|
n = cpuid_eax(2) & 0xFF;
|
||||||
|
|
||||||
for ( i = 0 ; i < n ; i++ ) {
|
for (i = 0 ; i < n ; i++) {
|
||||||
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
||||||
|
|
||||||
/* If bit 31 is set, this is an unknown format */
|
/* If bit 31 is set, this is an unknown format */
|
||||||
for ( j = 0 ; j < 3 ; j++ ) {
|
for (j = 0 ; j < 3 ; j++)
|
||||||
if (regs[j] & (1 << 31)) regs[j] = 0;
|
if (regs[j] & (1 << 31))
|
||||||
}
|
regs[j] = 0;
|
||||||
|
|
||||||
/* Byte 0 is level count, not a descriptor */
|
/* Byte 0 is level count, not a descriptor */
|
||||||
for ( j = 1 ; j < 16 ; j++ ) {
|
for (j = 1 ; j < 16 ; j++) {
|
||||||
unsigned char des = dp[j];
|
unsigned char des = dp[j];
|
||||||
unsigned char k = 0;
|
unsigned char k = 0;
|
||||||
|
|
||||||
/* look up this descriptor in the table */
|
/* look up this descriptor in the table */
|
||||||
while (cache_table[k].descriptor != 0)
|
while (cache_table[k].descriptor != 0) {
|
||||||
{
|
|
||||||
if (cache_table[k].descriptor == des) {
|
if (cache_table[k].descriptor == des) {
|
||||||
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
|
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
|
||||||
break;
|
break;
|
||||||
|
@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (trace)
|
if (trace)
|
||||||
printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
||||||
else if ( l1i )
|
else if (l1i)
|
||||||
printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
||||||
|
|
||||||
if (l1d)
|
if (l1d)
|
||||||
printk(", L1 D cache: %dK\n", l1d);
|
printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
|
||||||
else
|
else
|
||||||
printk("\n");
|
printk(KERN_CONT "\n");
|
||||||
|
|
||||||
if (l2)
|
if (l2)
|
||||||
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
||||||
|
@ -558,8 +559,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
|
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||||
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __cpuinit free_cache_attributes(unsigned int cpu)
|
static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||||
|
@ -645,7 +651,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
|
||||||
static ssize_t show_##file_name \
|
static ssize_t show_##file_name \
|
||||||
(struct _cpuid4_info *this_leaf, char *buf) \
|
(struct _cpuid4_info *this_leaf, char *buf) \
|
||||||
{ \
|
{ \
|
||||||
return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
||||||
}
|
}
|
||||||
|
|
||||||
show_one_plus(level, eax.split.level, 0);
|
show_one_plus(level, eax.split.level, 0);
|
||||||
|
@ -656,7 +662,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
|
||||||
|
|
||||||
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
|
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
|
||||||
{
|
{
|
||||||
return sprintf (buf, "%luK\n", this_leaf->size / 1024);
|
return sprintf(buf, "%luK\n", this_leaf->size / 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
||||||
|
@ -669,7 +675,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
|
|
||||||
mask = to_cpumask(this_leaf->shared_cpu_map);
|
mask = to_cpumask(this_leaf->shared_cpu_map);
|
||||||
n = type?
|
n = type ?
|
||||||
cpulist_scnprintf(buf, len-2, mask) :
|
cpulist_scnprintf(buf, len-2, mask) :
|
||||||
cpumask_scnprintf(buf, len-2, mask);
|
cpumask_scnprintf(buf, len-2, mask);
|
||||||
buf[n++] = '\n';
|
buf[n++] = '\n';
|
||||||
|
@ -800,7 +806,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
||||||
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
||||||
show_cache_disable_1, store_cache_disable_1);
|
show_cache_disable_1, store_cache_disable_1);
|
||||||
|
|
||||||
static struct attribute * default_attrs[] = {
|
static struct attribute *default_attrs[] = {
|
||||||
&type.attr,
|
&type.attr,
|
||||||
&level.attr,
|
&level.attr,
|
||||||
&coherency_line_size.attr,
|
&coherency_line_size.attr,
|
||||||
|
@ -815,7 +821,7 @@ static struct attribute * default_attrs[] = {
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
|
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct _cache_attr *fattr = to_attr(attr);
|
struct _cache_attr *fattr = to_attr(attr);
|
||||||
struct _index_kobject *this_leaf = to_object(kobj);
|
struct _index_kobject *this_leaf = to_object(kobj);
|
||||||
|
@ -828,8 +834,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t store(struct kobject * kobj, struct attribute * attr,
|
static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
||||||
const char * buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct _cache_attr *fattr = to_attr(attr);
|
struct _cache_attr *fattr = to_attr(attr);
|
||||||
struct _index_kobject *this_leaf = to_object(kobj);
|
struct _index_kobject *this_leaf = to_object(kobj);
|
||||||
|
@ -883,7 +889,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
per_cpu(index_kobject, cpu) = kzalloc(
|
per_cpu(index_kobject, cpu) = kzalloc(
|
||||||
sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
|
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
||||||
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
|
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
@ -917,7 +923,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_cache_leaves; i++) {
|
for (i = 0; i < num_cache_leaves; i++) {
|
||||||
this_object = INDEX_KOBJECT_PTR(cpu,i);
|
this_object = INDEX_KOBJECT_PTR(cpu, i);
|
||||||
this_object->cpu = cpu;
|
this_object->cpu = cpu;
|
||||||
this_object->index = i;
|
this_object->index = i;
|
||||||
retval = kobject_init_and_add(&(this_object->kobj),
|
retval = kobject_init_and_add(&(this_object->kobj),
|
||||||
|
@ -925,9 +931,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||||
per_cpu(cache_kobject, cpu),
|
per_cpu(cache_kobject, cpu),
|
||||||
"index%1lu", i);
|
"index%1lu", i);
|
||||||
if (unlikely(retval)) {
|
if (unlikely(retval)) {
|
||||||
for (j = 0; j < i; j++) {
|
for (j = 0; j < i; j++)
|
||||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
||||||
}
|
|
||||||
kobject_put(per_cpu(cache_kobject, cpu));
|
kobject_put(per_cpu(cache_kobject, cpu));
|
||||||
cpuid4_cache_sysfs_exit(cpu);
|
cpuid4_cache_sysfs_exit(cpu);
|
||||||
return retval;
|
return retval;
|
||||||
|
@ -952,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
||||||
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
|
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
|
||||||
|
|
||||||
for (i = 0; i < num_cache_leaves; i++)
|
for (i = 0; i < num_cache_leaves; i++)
|
||||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
||||||
kobject_put(per_cpu(cache_kobject, cpu));
|
kobject_put(per_cpu(cache_kobject, cpu));
|
||||||
cpuid4_cache_sysfs_exit(cpu);
|
cpuid4_cache_sysfs_exit(cpu);
|
||||||
}
|
}
|
||||||
|
@ -977,8 +982,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
|
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
|
||||||
{
|
|
||||||
.notifier_call = cacheinfo_cpu_callback,
|
.notifier_call = cacheinfo_cpu_callback,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
|
||||||
/* returns the bit offset of the performance counter register */
|
/* returns the bit offset of the performance counter register */
|
||||||
switch (boot_cpu_data.x86_vendor) {
|
switch (boot_cpu_data.x86_vendor) {
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
return (msr - MSR_K7_PERFCTR0);
|
return msr - MSR_K7_PERFCTR0;
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||||
return (msr - MSR_ARCH_PERFMON_PERFCTR0);
|
return msr - MSR_ARCH_PERFMON_PERFCTR0;
|
||||||
|
|
||||||
switch (boot_cpu_data.x86) {
|
switch (boot_cpu_data.x86) {
|
||||||
case 6:
|
case 6:
|
||||||
return (msr - MSR_P6_PERFCTR0);
|
return msr - MSR_P6_PERFCTR0;
|
||||||
case 15:
|
case 15:
|
||||||
return (msr - MSR_P4_BPU_PERFCTR0);
|
return msr - MSR_P4_BPU_PERFCTR0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
|
||||||
/* returns the bit offset of the event selection register */
|
/* returns the bit offset of the event selection register */
|
||||||
switch (boot_cpu_data.x86_vendor) {
|
switch (boot_cpu_data.x86_vendor) {
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
return (msr - MSR_K7_EVNTSEL0);
|
return msr - MSR_K7_EVNTSEL0;
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||||
return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
|
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
|
||||||
|
|
||||||
switch (boot_cpu_data.x86) {
|
switch (boot_cpu_data.x86) {
|
||||||
case 6:
|
case 6:
|
||||||
return (msr - MSR_P6_EVNTSEL0);
|
return msr - MSR_P6_EVNTSEL0;
|
||||||
case 15:
|
case 15:
|
||||||
return (msr - MSR_P4_BSU_ESCR0);
|
return msr - MSR_P4_BSU_ESCR0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
|
||||||
{
|
{
|
||||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||||
|
|
||||||
return (!test_bit(counter, perfctr_nmi_owner));
|
return !test_bit(counter, perfctr_nmi_owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* checks the an msr for availability */
|
/* checks the an msr for availability */
|
||||||
|
@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
|
||||||
counter = nmi_perfctr_msr_to_bit(msr);
|
counter = nmi_perfctr_msr_to_bit(msr);
|
||||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||||
|
|
||||||
return (!test_bit(counter, perfctr_nmi_owner));
|
return !test_bit(counter, perfctr_nmi_owner);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
|
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz)
|
||||||
*/
|
*/
|
||||||
counter_val = (u64)cpu_khz * 1000;
|
counter_val = (u64)cpu_khz * 1000;
|
||||||
do_div(counter_val, retval);
|
do_div(counter_val, retval);
|
||||||
if (counter_val > 0x7fffffffULL) {
|
if (counter_val > 0x7fffffffULL) {
|
||||||
u64 count = (u64)cpu_khz * 1000;
|
u64 count = (u64)cpu_khz * 1000;
|
||||||
do_div(count, 0x7fffffffUL);
|
do_div(count, 0x7fffffffUL);
|
||||||
retval = count + 1;
|
retval = count + 1;
|
||||||
|
@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr,
|
||||||
u64 count = (u64)cpu_khz * 1000;
|
u64 count = (u64)cpu_khz * 1000;
|
||||||
|
|
||||||
do_div(count, nmi_hz);
|
do_div(count, nmi_hz);
|
||||||
if(descr)
|
if (descr)
|
||||||
pr_debug("setting %s to -0x%08Lx\n", descr, count);
|
pr_debug("setting %s to -0x%08Lx\n", descr, count);
|
||||||
wrmsrl(perfctr_msr, 0 - count);
|
wrmsrl(perfctr_msr, 0 - count);
|
||||||
}
|
}
|
||||||
|
@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr,
|
||||||
u64 count = (u64)cpu_khz * 1000;
|
u64 count = (u64)cpu_khz * 1000;
|
||||||
|
|
||||||
do_div(count, nmi_hz);
|
do_div(count, nmi_hz);
|
||||||
if(descr)
|
if (descr)
|
||||||
pr_debug("setting %s to -0x%08Lx\n", descr, count);
|
pr_debug("setting %s to -0x%08Lx\n", descr, count);
|
||||||
wrmsr(perfctr_msr, (u32)(-count), 0);
|
wrmsr(perfctr_msr, (u32)(-count), 0);
|
||||||
}
|
}
|
||||||
|
@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz)
|
||||||
|
|
||||||
/* setup the timer */
|
/* setup the timer */
|
||||||
wrmsr(evntsel_msr, evntsel, 0);
|
wrmsr(evntsel_msr, evntsel, 0);
|
||||||
write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
|
write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz);
|
||||||
|
|
||||||
/* initialize the wd struct before enabling */
|
/* initialize the wd struct before enabling */
|
||||||
wd->perfctr_msr = perfctr_msr;
|
wd->perfctr_msr = perfctr_msr;
|
||||||
|
@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz)
|
||||||
/* setup the timer */
|
/* setup the timer */
|
||||||
wrmsr(evntsel_msr, evntsel, 0);
|
wrmsr(evntsel_msr, evntsel, 0);
|
||||||
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
|
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
|
||||||
write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
|
write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz);
|
||||||
|
|
||||||
/* initialize the wd struct before enabling */
|
/* initialize the wd struct before enabling */
|
||||||
wd->perfctr_msr = perfctr_msr;
|
wd->perfctr_msr = perfctr_msr;
|
||||||
|
@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
|
||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
|
|
||||||
/* P6/ARCH_PERFMON has 32 bit counter write */
|
/* P6/ARCH_PERFMON has 32 bit counter write */
|
||||||
write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
|
write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct wd_ops p6_wd_ops = {
|
static const struct wd_ops p6_wd_ops = {
|
||||||
|
@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz)
|
||||||
if (smp_num_siblings == 2) {
|
if (smp_num_siblings == 2) {
|
||||||
unsigned int ebx, apicid;
|
unsigned int ebx, apicid;
|
||||||
|
|
||||||
ebx = cpuid_ebx(1);
|
ebx = cpuid_ebx(1);
|
||||||
apicid = (ebx >> 24) & 0xff;
|
apicid = (ebx >> 24) & 0xff;
|
||||||
ht_num = apicid & 1;
|
ht_num = apicid & 1;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
ht_num = 0;
|
ht_num = 0;
|
||||||
|
@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz)
|
||||||
}
|
}
|
||||||
|
|
||||||
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
|
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
|
||||||
| P4_ESCR_OS
|
| P4_ESCR_OS
|
||||||
| P4_ESCR_USR;
|
| P4_ESCR_USR;
|
||||||
|
|
||||||
cccr_val |= P4_CCCR_THRESHOLD(15)
|
cccr_val |= P4_CCCR_THRESHOLD(15)
|
||||||
|
@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
|
||||||
{
|
{
|
||||||
unsigned dummy;
|
unsigned dummy;
|
||||||
/*
|
/*
|
||||||
* P4 quirks:
|
* P4 quirks:
|
||||||
* - An overflown perfctr will assert its interrupt
|
* - An overflown perfctr will assert its interrupt
|
||||||
* until the OVF flag in its CCCR is cleared.
|
* until the OVF flag in its CCCR is cleared.
|
||||||
* - LVTPC is masked on interrupt and must be
|
* - LVTPC is masked on interrupt and must be
|
||||||
|
@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
|
||||||
* NOTE: Corresponding bit = 0 in ebx indicates event present.
|
* NOTE: Corresponding bit = 0 in ebx indicates event present.
|
||||||
*/
|
*/
|
||||||
cpuid(10, &(eax.full), &ebx, &unused, &unused);
|
cpuid(10, &(eax.full), &ebx, &unused, &unused);
|
||||||
if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
|
if ((eax.split.mask_length <
|
||||||
|
(ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
|
||||||
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
|
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
if (i < ARRAY_SIZE(x86_power_flags) &&
|
if (i < ARRAY_SIZE(x86_power_flags) &&
|
||||||
x86_power_flags[i])
|
x86_power_flags[i])
|
||||||
seq_printf(m, "%s%s",
|
seq_printf(m, "%s%s",
|
||||||
x86_power_flags[i][0]?" ":"",
|
x86_power_flags[i][0] ? " " : "",
|
||||||
x86_power_flags[i]);
|
x86_power_flags[i]);
|
||||||
else
|
else
|
||||||
seq_printf(m, " [%d]", i);
|
seq_printf(m, " [%d]", i);
|
||||||
|
|
|
@ -49,17 +49,17 @@ static inline int __vmware_platform(void)
|
||||||
|
|
||||||
static unsigned long __vmware_get_tsc_khz(void)
|
static unsigned long __vmware_get_tsc_khz(void)
|
||||||
{
|
{
|
||||||
uint64_t tsc_hz;
|
uint64_t tsc_hz;
|
||||||
uint32_t eax, ebx, ecx, edx;
|
uint32_t eax, ebx, ecx, edx;
|
||||||
|
|
||||||
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
|
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
|
||||||
|
|
||||||
if (ebx == UINT_MAX)
|
if (ebx == UINT_MAX)
|
||||||
return 0;
|
return 0;
|
||||||
tsc_hz = eax | (((uint64_t)ebx) << 32);
|
tsc_hz = eax | (((uint64_t)ebx) << 32);
|
||||||
do_div(tsc_hz, 1000);
|
do_div(tsc_hz, 1000);
|
||||||
BUG_ON(tsc_hz >> 32);
|
BUG_ON(tsc_hz >> 32);
|
||||||
return tsc_hz;
|
return tsc_hz;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue