2008-10-23 13:26:29 +08:00
|
|
|
#ifndef _ASM_X86_MICROCODE_H
|
|
|
|
#define _ASM_X86_MICROCODE_H
|
2008-08-20 06:22:26 +08:00
|
|
|
|
2015-03-19 02:28:56 +08:00
|
|
|
#include <linux/earlycpio.h>
|
|
|
|
|
2013-12-04 19:31:31 +08:00
|
|
|
#define native_rdmsr(msr, val1, val2) \
|
|
|
|
do { \
|
|
|
|
u64 __val = native_read_msr((msr)); \
|
|
|
|
(void)((val1) = (u32)__val); \
|
|
|
|
(void)((val2) = (u32)(__val >> 32)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define native_wrmsr(msr, low, high) \
|
|
|
|
native_write_msr(msr, low, high)
|
|
|
|
|
|
|
|
#define native_wrmsrl(msr, val) \
|
|
|
|
native_write_msr((msr), \
|
|
|
|
(u32)((u64)(val)), \
|
|
|
|
(u32)((u64)(val) >> 32))
|
|
|
|
|
2008-09-23 18:08:44 +08:00
|
|
|
struct cpu_signature {
|
|
|
|
unsigned int sig;
|
|
|
|
unsigned int pf;
|
|
|
|
unsigned int rev;
|
|
|
|
};
|
2008-07-29 00:44:21 +08:00
|
|
|
|
2008-09-12 05:27:52 +08:00
|
|
|
struct device;
|
2008-08-20 06:22:26 +08:00
|
|
|
|
2009-05-12 05:48:27 +08:00
|
|
|
enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
|
2014-05-20 02:59:17 +08:00
|
|
|
extern bool dis_ucode_ldr;
|
2009-05-12 05:48:27 +08:00
|
|
|
|
2008-07-29 00:44:20 +08:00
|
|
|
struct microcode_ops {
|
2009-05-12 05:48:27 +08:00
|
|
|
enum ucode_state (*request_microcode_user) (int cpu,
|
|
|
|
const void __user *buf, size_t size);
|
2008-09-12 05:27:52 +08:00
|
|
|
|
2012-07-26 21:51:00 +08:00
|
|
|
enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
|
|
|
|
bool refresh_fw);
|
2008-09-12 05:27:52 +08:00
|
|
|
|
|
|
|
void (*microcode_fini_cpu) (int cpu);
|
2009-05-12 05:48:27 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The generic 'microcode_core' part guarantees that
|
|
|
|
* the callbacks below run on a target cpu when they
|
|
|
|
* are being called.
|
|
|
|
* See also the "Synchronization" section in microcode_core.c.
|
|
|
|
*/
|
|
|
|
int (*apply_microcode) (int cpu);
|
|
|
|
int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
|
2008-07-29 00:44:20 +08:00
|
|
|
};
|
|
|
|
|
2008-08-20 06:22:26 +08:00
|
|
|
struct ucode_cpu_info {
|
2009-05-12 05:48:27 +08:00
|
|
|
struct cpu_signature cpu_sig;
|
|
|
|
int valid;
|
|
|
|
void *mc;
|
2008-07-29 00:44:15 +08:00
|
|
|
};
|
2008-08-20 06:22:26 +08:00
|
|
|
extern struct ucode_cpu_info ucode_cpu_info[];
|
|
|
|
|
2008-09-23 18:08:44 +08:00
|
|
|
#ifdef CONFIG_MICROCODE_INTEL
|
|
|
|
extern struct microcode_ops * __init init_intel_microcode(void);
|
|
|
|
#else
|
|
|
|
static inline struct microcode_ops * __init init_intel_microcode(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MICROCODE_INTEL */
|
|
|
|
|
|
|
|
#ifdef CONFIG_MICROCODE_AMD
|
|
|
|
extern struct microcode_ops * __init init_amd_microcode(void);
|
2011-12-02 23:50:04 +08:00
|
|
|
extern void __exit exit_amd_microcode(void);
|
2008-09-23 18:08:44 +08:00
|
|
|
#else
|
|
|
|
static inline struct microcode_ops * __init init_amd_microcode(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-12-02 23:50:04 +08:00
|
|
|
static inline void __exit exit_amd_microcode(void) {}
|
2008-09-23 18:08:44 +08:00
|
|
|
#endif
|
|
|
|
|
2012-12-21 15:44:25 +08:00
|
|
|
#ifdef CONFIG_MICROCODE_EARLY
|
|
|
|
#define MAX_UCODE_COUNT 128
|
2015-02-10 04:42:34 +08:00
|
|
|
|
|
|
|
#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
|
|
|
|
#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
|
|
|
|
#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
|
|
|
|
#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
|
|
|
|
#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
|
|
|
|
#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
|
|
|
|
#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
|
|
|
|
|
|
|
|
#define CPUID_IS(a, b, c, ebx, ecx, edx) \
|
|
|
|
(!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
|
|
|
|
* x86_vendor() gets vendor id for BSP.
|
|
|
|
*
|
|
|
|
* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
|
|
|
|
* coding, we still use x86_vendor() to get vendor id for AP.
|
|
|
|
*
|
|
|
|
* x86_vendor() gets vendor information directly from CPUID.
|
|
|
|
*/
|
|
|
|
static inline int x86_vendor(void)
|
|
|
|
{
|
|
|
|
u32 eax = 0x00000000;
|
|
|
|
u32 ebx, ecx = 0, edx;
|
|
|
|
|
|
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
|
|
|
|
return X86_VENDOR_INTEL;
|
|
|
|
|
|
|
|
if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
|
|
|
|
return X86_VENDOR_AMD;
|
|
|
|
|
|
|
|
return X86_VENDOR_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int __x86_family(unsigned int sig)
|
|
|
|
{
|
|
|
|
unsigned int x86;
|
|
|
|
|
|
|
|
x86 = (sig >> 8) & 0xf;
|
|
|
|
|
|
|
|
if (x86 == 0xf)
|
|
|
|
x86 += (sig >> 20) & 0xff;
|
|
|
|
|
|
|
|
return x86;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int x86_family(void)
|
|
|
|
{
|
|
|
|
u32 eax = 0x00000001;
|
|
|
|
u32 ebx, ecx = 0, edx;
|
|
|
|
|
|
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
return __x86_family(eax);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int x86_model(unsigned int sig)
|
|
|
|
{
|
|
|
|
unsigned int x86, model;
|
|
|
|
|
|
|
|
x86 = __x86_family(sig);
|
|
|
|
|
|
|
|
model = (sig >> 4) & 0xf;
|
|
|
|
|
|
|
|
if (x86 == 0x6 || x86 == 0xf)
|
|
|
|
model += ((sig >> 16) & 0xf) << 4;
|
|
|
|
|
|
|
|
return model;
|
|
|
|
}
|
|
|
|
|
2012-12-21 15:44:25 +08:00
|
|
|
extern void __init load_ucode_bsp(void);
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
extern void load_ucode_ap(void);
|
2012-12-21 15:44:25 +08:00
|
|
|
extern int __init save_microcode_in_initrd(void);
|
2014-12-04 00:21:41 +08:00
|
|
|
void reload_early_microcode(void);
|
2015-03-19 02:28:56 +08:00
|
|
|
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
2012-12-21 15:44:25 +08:00
|
|
|
#else
|
|
|
|
static inline void __init load_ucode_bsp(void) {}
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static inline void load_ucode_ap(void) {}
|
2012-12-21 15:44:25 +08:00
|
|
|
static inline int __init save_microcode_in_initrd(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2014-12-04 00:21:41 +08:00
|
|
|
static inline void reload_early_microcode(void) {}
|
2015-03-19 02:28:56 +08:00
|
|
|
static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2012-12-21 15:44:25 +08:00
|
|
|
#endif
|
2008-10-23 13:26:29 +08:00
|
|
|
#endif /* _ASM_X86_MICROCODE_H */
|