x86/CPU: Move cpu_detect_cache_sizes() into init_intel_cacheinfo()
There is no point in having the conditional cpu_detect_cache_sizes() call at the callsite of init_intel_cacheinfo(). Move it into init_intel_cacheinfo() and make init_intel_cacheinfo() void. [ tglx: Made the init_intel_cacheinfo() void as the return value was pointless. Adjust changelog accordingly ] Signed-off-by: David Wang <davidwang@zhaoxin.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: lukelin@viacpu.com Cc: qiyuanwang@zhaoxin.com Cc: gregkh@linuxfoundation.org Cc: brucechang@via-alliance.com Cc: timguo@zhaoxin.com Cc: cooperyan@zhaoxin.com Cc: hpa@zytor.com Cc: benjaminpan@viatech.com Link: https://lkml.kernel.org/r/1525314766-18910-3-git-send-email-davidwang@zhaoxin.com
This commit is contained in:
parent
2cc61be60e
commit
807e9bc8e2
|
@ -691,7 +691,7 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Cache sizes */
|
||||
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
|
||||
|
@ -843,7 +843,8 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||
|
||||
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
||||
|
||||
return l2;
|
||||
if (!l2)
|
||||
cpu_detect_cache_sizes(c);
|
||||
}
|
||||
|
||||
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
|
||||
|
|
|
@ -51,7 +51,7 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
|||
extern u32 get_scattered_cpuid_leaf(unsigned int level,
|
||||
unsigned int sub_leaf,
|
||||
enum cpuid_regs_idx reg);
|
||||
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
||||
|
||||
extern int detect_num_cpu_cores(struct cpuinfo_x86 *c);
|
||||
|
|
|
@ -635,8 +635,6 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
|
|||
|
||||
static void init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int l2 = 0;
|
||||
|
||||
early_init_intel(c);
|
||||
|
||||
intel_workarounds(c);
|
||||
|
@ -659,13 +657,7 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
}
|
||||
|
||||
l2 = init_intel_cacheinfo(c);
|
||||
|
||||
/* Detect legacy cache sizes if init_intel_cacheinfo did not */
|
||||
if (l2 == 0) {
|
||||
cpu_detect_cache_sizes(c);
|
||||
l2 = c->x86_cache_size;
|
||||
}
|
||||
init_intel_cacheinfo(c);
|
||||
|
||||
if (c->cpuid_level > 9) {
|
||||
unsigned eax = cpuid_eax(10);
|
||||
|
@ -678,7 +670,8 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_DS)) {
|
||||
unsigned int l1;
|
||||
unsigned int l1, l2;
|
||||
|
||||
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
||||
if (!(l1 & (1<<11)))
|
||||
set_cpu_cap(c, X86_FEATURE_BTS);
|
||||
|
@ -706,6 +699,7 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||
* Dixon is NOT a Celeron.
|
||||
*/
|
||||
if (c->x86 == 6) {
|
||||
unsigned int l2 = c->x86_cache_size;
|
||||
char *p = NULL;
|
||||
|
||||
switch (c->x86_model) {
|
||||
|
|
Loading…
Reference in New Issue