Merge remote branch 'origin/x86/cpu' into x86/amd-nb
This commit is contained in:
commit
86ffb08519
|
@ -152,10 +152,14 @@
|
|||
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
|
||||
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
|
@ -179,6 +183,13 @@
|
|||
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
|
||||
|
|
|
@ -412,6 +412,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
|
|||
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We need to do the following only once */
|
||||
if (c != &boot_cpu_data)
|
||||
return;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
|
||||
|
||||
if (c->x86 > 0x10 ||
|
||||
(c->x86 == 0x10 && c->x86_model >= 0x2)) {
|
||||
u64 val;
|
||||
|
||||
rdmsrl(MSR_K7_HWCR, val);
|
||||
if (!(val & BIT(24)))
|
||||
printk(KERN_WARNING FW_BUG "TSC doesn't count "
|
||||
"with P0 frequency!\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
|
@ -523,7 +540,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000006) {
|
||||
if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
|
||||
if (cpuid_edx(0x80000006) & 0xf000)
|
||||
num_cache_leaves = 4;
|
||||
else
|
||||
num_cache_leaves = 3;
|
||||
|
|
|
@ -43,6 +43,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|||
{ X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
|
||||
{ X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
|
|
@ -892,60 +892,6 @@ static void __init init_tsc_clocksource(void)
|
|||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* calibrate_cpu is used on systems with fixed rate TSCs to determine
|
||||
* processor frequency
|
||||
*/
|
||||
#define TICK_COUNT 100000000
|
||||
static unsigned long __init calibrate_cpu(void)
|
||||
{
|
||||
int tsc_start, tsc_now;
|
||||
int i, no_ctr_free;
|
||||
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
|
||||
unsigned long flags;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
if (avail_to_resrv_perfctr_nmi_bit(i))
|
||||
break;
|
||||
no_ctr_free = (i == 4);
|
||||
if (no_ctr_free) {
|
||||
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
|
||||
"cpu_khz value may be incorrect.\n");
|
||||
i = 3;
|
||||
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
||||
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
||||
rdmsrl(MSR_K7_PERFCTR3, pmc3);
|
||||
} else {
|
||||
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
||||
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
|
||||
}
|
||||
local_irq_save(flags);
|
||||
/* start measuring cycles, incrementing from 0 */
|
||||
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
|
||||
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
|
||||
rdtscl(tsc_start);
|
||||
do {
|
||||
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
|
||||
tsc_now = get_cycles();
|
||||
} while ((tsc_now - tsc_start) < TICK_COUNT);
|
||||
|
||||
local_irq_restore(flags);
|
||||
if (no_ctr_free) {
|
||||
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
||||
wrmsrl(MSR_K7_PERFCTR3, pmc3);
|
||||
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
||||
} else {
|
||||
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
||||
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
|
||||
}
|
||||
|
||||
return pmc_now * tsc_khz / (tsc_now - tsc_start);
|
||||
}
|
||||
#else
|
||||
static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
|
||||
#endif
|
||||
|
||||
void __init tsc_init(void)
|
||||
{
|
||||
u64 lpj;
|
||||
|
@ -964,10 +910,6 @@ void __init tsc_init(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
|
||||
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
|
||||
cpu_khz = calibrate_cpu();
|
||||
|
||||
printk("Detected %lu.%03lu MHz processor.\n",
|
||||
(unsigned long)cpu_khz / 1000,
|
||||
(unsigned long)cpu_khz % 1000);
|
||||
|
|
|
@ -1991,13 +1991,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
|
||||
0 /* Reserved, DCA */ | F(XMM4_1) |
|
||||
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
|
||||
0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
|
||||
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
|
||||
F(F16C);
|
||||
/* cpuid 0x80000001.ecx */
|
||||
const u32 kvm_supported_word6_x86_features =
|
||||
F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
|
||||
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
|
||||
F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
|
||||
0 /* SKINIT */ | 0 /* WDT */;
|
||||
F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
|
||||
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
|
||||
|
||||
/* all calls to cpuid_count() should be made on the same cpu */
|
||||
get_cpu();
|
||||
|
|
Loading…
Reference in New Issue