2008-10-23 13:26:29 +08:00
|
|
|
#ifndef _ASM_X86_MCE_H
|
|
|
|
#define _ASM_X86_MCE_H
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-01-31 01:17:27 +08:00
|
|
|
#include <linux/types.h>
|
2007-10-18 00:04:40 +08:00
|
|
|
#include <asm/ioctls.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Machine Check support for x86
|
|
|
|
*/
|
|
|
|
|
2009-04-08 18:31:24 +08:00
|
|
|
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
|
|
|
|
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
|
|
|
|
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
|
|
|
|
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
|
|
|
|
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
|
|
|
|
#define MCG_EXT_CNT_SHIFT 16
|
|
|
|
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
|
2009-05-28 03:56:57 +08:00
|
|
|
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-04-08 18:31:25 +08:00
|
|
|
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
|
|
|
|
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
|
|
|
|
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-04-08 18:31:25 +08:00
|
|
|
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
|
|
|
|
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
|
|
|
|
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
|
|
|
|
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
|
|
|
|
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
|
|
|
|
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
|
|
|
|
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
|
2009-05-28 03:56:57 +08:00
|
|
|
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
|
|
|
|
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
|
|
|
|
|
|
|
|
/* MISC register defines */
|
|
|
|
#define MCM_ADDR_SEGOFF 0 /* segment offset */
|
|
|
|
#define MCM_ADDR_LINEAR 1 /* linear address */
|
|
|
|
#define MCM_ADDR_PHYS 2 /* physical address */
|
|
|
|
#define MCM_ADDR_MEM 3 /* memory address */
|
|
|
|
#define MCM_ADDR_GENERIC 7 /* generic */
|
2007-10-18 00:04:40 +08:00
|
|
|
|
|
|
|
/* Fields are zero when not available */
|
|
|
|
struct mce {
|
|
|
|
__u64 status;
|
|
|
|
__u64 misc;
|
|
|
|
__u64 addr;
|
|
|
|
__u64 mcgstatus;
|
2008-01-30 20:30:56 +08:00
|
|
|
__u64 ip;
|
2007-10-18 00:04:40 +08:00
|
|
|
__u64 tsc; /* cpu time stamp counter */
|
2009-05-28 03:56:56 +08:00
|
|
|
__u64 time; /* wall time_t when error was detected */
|
|
|
|
__u8 cpuvendor; /* cpu vendor as encoded in system.h */
|
|
|
|
__u8 pad1;
|
|
|
|
__u16 pad2;
|
|
|
|
__u32 cpuid; /* CPUID 1 EAX */
|
2007-10-18 00:04:40 +08:00
|
|
|
__u8 cs; /* code segment */
|
|
|
|
__u8 bank; /* machine check bank */
|
2009-05-28 03:56:56 +08:00
|
|
|
__u8 cpu; /* cpu number; obsolete; use extcpu now */
|
2007-10-18 00:04:40 +08:00
|
|
|
__u8 finished; /* entry is valid */
|
2009-05-28 03:56:56 +08:00
|
|
|
__u32 extcpu; /* linux cpu number that detected the error */
|
2009-05-28 03:56:56 +08:00
|
|
|
__u32 socketid; /* CPU socket ID */
|
|
|
|
__u32 apicid; /* CPU initial apic ID */
|
|
|
|
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
|
2007-10-18 00:04:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This structure contains all data related to the MCE log. Also
|
|
|
|
* carries a signature to make it easier to find from external
|
|
|
|
* debugging tools. Each entry is only valid when its finished flag
|
|
|
|
* is set.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MCE_LOG_LEN 32
|
|
|
|
|
|
|
|
struct mce_log {
|
|
|
|
char signature[12]; /* "MACHINECHECK" */
|
|
|
|
unsigned len; /* = MCE_LOG_LEN */
|
|
|
|
unsigned next;
|
|
|
|
unsigned flags;
|
2009-05-28 03:56:55 +08:00
|
|
|
unsigned recordlen; /* length of struct mce */
|
2007-10-18 00:04:40 +08:00
|
|
|
struct mce entry[MCE_LOG_LEN];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
|
|
|
|
|
|
|
|
#define MCE_LOG_SIGNATURE "MACHINECHECK"
|
|
|
|
|
|
|
|
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
|
|
|
|
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
|
|
|
|
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
|
|
|
|
|
|
|
|
/* Software defined banks */
|
|
|
|
#define MCE_EXTENDED_BANK 128
|
|
|
|
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
|
|
|
|
|
|
|
|
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
|
|
|
|
#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2009-06-15 16:22:15 +08:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
2007-10-18 00:04:40 +08:00
|
|
|
extern int mce_disabled;
|
|
|
|
|
2009-06-15 16:22:15 +08:00
|
|
|
#ifdef CONFIG_X86_OLD_MCE
|
|
|
|
void amd_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_ANCIENT_MCE
|
|
|
|
void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
void winchip_mcheck_init(struct cpuinfo_x86 *c);
|
|
|
|
extern int mce_p5_enable;
|
|
|
|
static inline int mce_p5_enabled(void) { return mce_p5_enable; }
|
|
|
|
static inline void enable_p5_mce(void) { mce_p5_enable = 1; }
|
|
|
|
#else
|
|
|
|
static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
|
|
|
|
static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
|
|
|
|
static inline int mce_p5_enabled(void) { return 0; }
|
|
|
|
static inline void enable_p5_mce(void) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Call the installed machine check handler for this CPU setup. */
|
|
|
|
extern void (*machine_check_vector)(struct pt_regs *, long error_code);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_OLD_MCE
|
|
|
|
extern int nr_mce_banks;
|
|
|
|
extern void intel_set_thermal_handler(void);
|
|
|
|
#else
|
|
|
|
static inline void intel_set_thermal_handler(void) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void intel_init_thermal(struct cpuinfo_x86 *c);
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-02-12 20:43:22 +08:00
|
|
|
void mce_setup(struct mce *m);
|
2007-10-18 00:04:40 +08:00
|
|
|
void mce_log(struct mce *m);
|
2009-04-08 18:31:17 +08:00
|
|
|
DECLARE_PER_CPU(struct sys_device, mce_dev);
|
x86 MCE: Fix CPU hotplug problem with multiple multicore AMD CPUs
During CPU hot-remove the sysfs directory created by
threshold_create_bank(), defined in
arch/x86/kernel/cpu/mcheck/mce_amd_64.c, has to be removed before
its parent directory, created by mce_create_device(), defined in
arch/x86/kernel/cpu/mcheck/mce_64.c . Moreover, when the CPU in
question is hotplugged again, obviously the latter has to be created
before the former. At present, the right ordering is not enforced,
because all of these operations are carried out by CPU hotplug
notifiers which are not appropriately ordered with respect to each
other. This leads to serious problems on systems with two or more
multicore AMD CPUs, among other things during suspend and hibernation.
Fix the problem by placing threshold bank CPU hotplug callbacks in
mce_cpu_callback(), so that they are invoked at the right places,
if defined. Additionally, use kobject_del() to remove the sysfs
directory associated with the kobject created by
kobject_create_and_add() in threshold_create_bank(), to prevent the
kernel from crashing during CPU hotplug operations on systems with
two or more multicore AMD CPUs.
This patch fixes bug #11337.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Andi Kleen <andi@firstfloor.org>
Tested-by: Mark Langsdorf <mark.langsdorf@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-08-23 04:23:09 +08:00
|
|
|
extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-02-12 20:49:30 +08:00
|
|
|
/*
|
|
|
|
* To support more than 128 would need to escape the predefined
|
|
|
|
* Linux defined extended banks first.
|
|
|
|
*/
|
|
|
|
#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
|
|
|
|
|
2007-10-18 00:04:40 +08:00
|
|
|
#ifdef CONFIG_X86_MCE_INTEL
|
2009-06-11 15:06:07 +08:00
|
|
|
extern int mce_cmci_disabled;
|
|
|
|
extern int mce_ignore_ce;
|
2007-10-18 00:04:40 +08:00
|
|
|
void mce_intel_feature_init(struct cpuinfo_x86 *c);
|
2009-02-12 20:49:36 +08:00
|
|
|
void cmci_clear(void);
|
|
|
|
void cmci_reenable(void);
|
|
|
|
void cmci_rediscover(int dying);
|
|
|
|
void cmci_recheck(void);
|
2007-10-18 00:04:40 +08:00
|
|
|
#else
|
|
|
|
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
|
2009-02-12 20:49:36 +08:00
|
|
|
static inline void cmci_clear(void) {}
|
|
|
|
static inline void cmci_reenable(void) {}
|
|
|
|
static inline void cmci_rediscover(int dying) {}
|
|
|
|
static inline void cmci_recheck(void) {}
|
2007-10-18 00:04:40 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_MCE_AMD
|
|
|
|
void mce_amd_feature_init(struct cpuinfo_x86 *c);
|
|
|
|
#else
|
|
|
|
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
|
|
|
|
#endif
|
|
|
|
|
2009-05-29 01:05:33 +08:00
|
|
|
int mce_available(struct cpuinfo_x86 *c);
|
2009-02-12 20:49:36 +08:00
|
|
|
|
2009-05-28 03:56:52 +08:00
|
|
|
DECLARE_PER_CPU(unsigned, mce_exception_count);
|
2009-05-28 03:56:57 +08:00
|
|
|
DECLARE_PER_CPU(unsigned, mce_poll_count);
|
2009-05-28 03:56:52 +08:00
|
|
|
|
2009-02-12 20:43:22 +08:00
|
|
|
void mce_log_therm_throt_event(__u64 status);
|
2007-10-18 00:04:40 +08:00
|
|
|
|
|
|
|
extern atomic_t mce_entry;
|
|
|
|
|
2009-05-29 01:05:33 +08:00
|
|
|
void do_machine_check(struct pt_regs *, long);
|
2009-02-12 20:43:23 +08:00
|
|
|
|
2009-02-12 20:49:34 +08:00
|
|
|
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
|
|
|
|
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
|
|
|
|
|
2009-02-12 20:43:23 +08:00
|
|
|
enum mcp_flags {
|
|
|
|
MCP_TIMESTAMP = (1 << 0), /* log time stamp */
|
|
|
|
MCP_UC = (1 << 1), /* log uncorrected errors */
|
2009-04-07 23:06:55 +08:00
|
|
|
MCP_DONTLOG = (1 << 2), /* only clear, don't log */
|
2009-02-12 20:43:23 +08:00
|
|
|
};
|
2009-05-29 01:05:33 +08:00
|
|
|
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
|
2009-02-12 20:43:23 +08:00
|
|
|
|
2009-05-28 03:56:58 +08:00
|
|
|
int mce_notify_irq(void);
|
x86, mce: support action-optional machine checks
Newer Intel CPUs support a new class of machine checks called recoverable
action optional.
Action Optional means that the CPU detected some form of corruption in
the background and tells the OS about using a machine check
exception. The OS can then take appropiate action, like killing the
process with the corrupted data or logging the event properly to disk.
This is done by the new generic high level memory failure handler added
in a earlier patch. The high level handler takes the address with the
failed memory and does the appropiate action, like killing the process.
In this version of the patch the high level handler is stubbed out
with a weak function to not create a direct dependency on the hwpoison
branch.
The high level handler cannot be directly called from the machine check
exception though, because it has to run in a defined process context to
be able to sleep when taking VM locks (it is not expected to sleep for a
long time, just do so in some exceptional cases like lock contention)
Thus the MCE handler has to queue a work item for process context,
trigger process context and then call the high level handler from there.
This patch adds two path to process context: through a per thread kernel
exit notify_user() callback or through a high priority work item.
The first runs when the process exits back to user space, the other when
it goes to sleep and there is no higher priority process.
The machine check handler will schedule both, and whoever runs first
will grab the event. This is done because quick reaction to this
event is critical to avoid a potential more fatal machine check
when the corruption is consumed.
There is a simple lock less ring buffer to queue the corrupted
addresses between the exception handler and the process context handler.
Then in process context it just calls the high level VM code with
the corrupted PFNs.
The code adds the required code to extract the failed address from
the CPU's machine check registers. It doesn't try to handle all
possible cases -- the specification has 6 different ways to specify
memory address -- but only the linear address.
Most of the required checking has been already done earlier in the
mce_severity rule checking engine. Following the Intel
recommendations Action Optional errors are only enabled for known
situations (encoded in MCACODs). The errors are ignored otherwise,
because they are action optional.
v2: Improve comment, disable preemption while processing ring buffer
(reported by Ying Huang)
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-05-28 03:56:59 +08:00
|
|
|
void mce_notify_process(void);
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-04-30 01:31:00 +08:00
|
|
|
DECLARE_PER_CPU(struct mce, injectm);
|
|
|
|
extern struct file_operations mce_chrdev_ops;
|
|
|
|
|
2008-01-30 20:30:17 +08:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-05-29 01:05:33 +08:00
|
|
|
void mcheck_init(struct cpuinfo_x86 *c);
|
2008-01-30 20:30:17 +08:00
|
|
|
#else
|
|
|
|
#define mcheck_init(c) do { } while (0)
|
|
|
|
#endif
|
2007-10-18 00:04:40 +08:00
|
|
|
|
2009-02-12 20:49:31 +08:00
|
|
|
extern void (*mce_threshold_vector)(void);
|
|
|
|
|
2007-10-18 00:04:40 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2008-10-23 13:26:29 +08:00
|
|
|
#endif /* _ASM_X86_MCE_H */
|