mce-internal.h 4.98 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
Borislav Petkov's avatar
Borislav Petkov committed
2 3 4
#ifndef __X86_MCE_INTERNAL_H__
#define __X86_MCE_INTERNAL_H__

5
#include <linux/device.h>
6 7 8 9
#include <asm/mce.h>

enum severity_level {
	MCE_NO_SEVERITY,
10 11
	MCE_DEFERRED_SEVERITY,
	MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
12
	MCE_KEEP_SEVERITY,
13
	MCE_SOME_SEVERITY,
14
	MCE_AO_SEVERITY,
15
	MCE_UC_SEVERITY,
16
	MCE_AR_SEVERITY,
17 18 19
	MCE_PANIC_SEVERITY,
};

20
extern struct blocking_notifier_head x86_mce_decoder_chain;
21

22
#define ATTR_LEN		16
23
#define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
24 25 26 27 28

/* One object for each MCE bank, shared by all CPUs */
struct mce_bank {
	u64			ctl;			/* subevents to enable */
	unsigned char init;				/* initialise bank? */
29
	struct device_attribute attr;			/* device attribute */
30 31 32
	char			attrname[ATTR_LEN];	/* attribute name */
};

33 34 35 36 37
struct mce_evt_llist {
	struct llist_node llnode;
	struct mce mce;
};

38
void mce_gen_pool_process(struct work_struct *__unused);
39 40 41
bool mce_gen_pool_empty(void);
int mce_gen_pool_add(struct mce *mce);
int mce_gen_pool_init(void);
42
struct llist_node *mce_gen_pool_prepare_records(void);
43

44
extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
45
struct dentry *mce_get_debugfs_dir(void);
46

47
extern struct mce_bank *mce_banks;
48
extern mce_banks_t mce_banks_ce_disabled;
49

Chen Gong's avatar
Chen Gong committed
50
#ifdef CONFIG_X86_MCE_INTEL
51 52
unsigned long cmci_intel_adjust_timer(unsigned long interval);
bool mce_intel_cmci_poll(void);
Chen Gong's avatar
Chen Gong committed
53
void mce_intel_hcpu_update(unsigned long cpu);
54
void cmci_disable_bank(int bank);
Chen Gong's avatar
Chen Gong committed
55
#else
56 57
# define cmci_intel_adjust_timer mce_adjust_timer_default
static inline bool mce_intel_cmci_poll(void) { return false; }
Chen Gong's avatar
Chen Gong committed
58
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
59
static inline void cmci_disable_bank(int bank) { }
Chen Gong's avatar
Chen Gong committed
60 61 62 63
#endif

void mce_timer_kick(unsigned long interval);

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#ifdef CONFIG_ACPI_APEI
int apei_write_mce(struct mce *m);
ssize_t apei_read_mce(struct mce *m, u64 *record_id);
int apei_check_mce(void);
int apei_clear_mce(u64 record_id);
#else
static inline int apei_write_mce(struct mce *m)
{
	return -EINVAL;
}
static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
	return 0;
}
static inline int apei_check_mce(void)
{
	return 0;
}
static inline int apei_clear_mce(u64 record_id)
{
	return -EINVAL;
}
#endif
87 88

void mce_inject_log(struct mce *m);
89 90 91 92 93 94 95 96 97 98 99 100 101 102

/*
 * We consider records to be equivalent if bank+status+addr+misc all match.
 * This is only used when the system is going down because of a fatal error
 * to avoid cluttering the console log with essentially repeated information.
 * In normal processing all errors seen are logged.
 */
static inline bool mce_cmp(struct mce *m1, struct mce *m2)
{
	return m1->bank != m2->bank ||
		m1->status != m2->status ||
		m1->addr != m2->addr ||
		m1->misc != m2->misc;
}
103 104 105 106

extern struct device_attribute dev_attr_trigger;

#ifdef CONFIG_X86_MCELOG_LEGACY
107 108 109
void mce_work_trigger(void);
void mce_register_injector_chain(struct notifier_block *nb);
void mce_unregister_injector_chain(struct notifier_block *nb);
110 111
#else
static inline void mce_work_trigger(void)	{ }
112 113
static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
114
#endif
Borislav Petkov's avatar
Borislav Petkov committed
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#ifndef CONFIG_X86_64
/*
 * On 32-bit systems it would be difficult to safely unmap a poison page
 * from the kernel 1:1 map because there are no non-canonical addresses that
 * we can use to refer to the address without risking a speculative access.
 * However, this isn't much of an issue because:
 * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
 *    are only mapped into the kernel as needed
 * 2) Few people would run a 32-bit kernel on a machine that supports
 *    recoverable errors because they have too much memory to boot 32-bit.
 */
static inline void mce_unmap_kpfn(unsigned long pfn) {}
#define mce_unmap_kpfn mce_unmap_kpfn
#endif

131 132 133 134
struct mca_config {
	bool dont_log_ce;
	bool cmci_disabled;
	bool ignore_ce;
135 136 137 138 139 140 141 142

	__u64 lmce_disabled		: 1,
	      disabled			: 1,
	      ser			: 1,
	      recovery			: 1,
	      bios_cmci_threshold	: 1,
	      __reserved		: 59;

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	u8 banks;
	s8 bootlog;
	int tolerant;
	int monarch_timeout;
	int panic_timeout;
	u32 rip_msr;
};

extern struct mca_config mca_cfg;

struct mce_vendor_flags {
	/*
	 * Indicates that overflow conditions are not fatal, when set.
	 */
	__u64 overflow_recov	: 1,

	/*
	 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
	 * Recovery. It indicates support for data poisoning in HW and deferred
	 * error interrupts.
	 */
	      succor		: 1,

	/*
	 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
	 * the register space for each MCA bank and also increases number of
	 * banks. Also, to accommodate the new banks and registers, the MCA
	 * register space is moved to a new MSR range.
	 */
	      smca		: 1,

	      __reserved_0	: 61;
};

extern struct mce_vendor_flags mce_flags;

struct mca_msr_regs {
	u32 (*ctl)	(int bank);
	u32 (*status)	(int bank);
	u32 (*addr)	(int bank);
	u32 (*misc)	(int bank);
};

extern struct mca_msr_regs msr_ops;

Borislav Petkov's avatar
Borislav Petkov committed
188
#endif /* __X86_MCE_INTERNAL_H__ */