mce-severity.c 9.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * MCE grading rules.
 * Copyright 2008, 2009 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 *
 * Author: Andi Kleen
 */
#include <linux/kernel.h>
13 14 15
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/debugfs.h>
16
#include <asm/mce.h>
17
#include <asm/uaccess.h>
18 19 20 21 22 23 24 25

#include "mce-internal.h"

/*
 * Grade an mce by severity. In general the most severe ones are processed
 * first. Since there are quite a lot of combinations test the bits in a
 * table-driven way. The rules are simply processed in order, first
 * match wins.
A
Andi Kleen 已提交
26 27 28 29 30
 *
 * Note this is only used for machine check exceptions, the corrected
 * errors use much simpler rules. The exceptions still check for the corrected
 * errors, but only to leave them alone for the CMCI handler (except for
 * panic situations)
31 32
 */

33
enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3 };
A
Andi Kleen 已提交
34
enum ser { SER_REQUIRED = 1, NO_SER = 2 };
35
enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 };
A
Andi Kleen 已提交
36

37 38 39 40 41 42
static struct severity {
	u64 mask;
	u64 result;
	unsigned char sev;
	unsigned char mcgmask;
	unsigned char mcgres;
A
Andi Kleen 已提交
43 44
	unsigned char ser;
	unsigned char context;
45
	unsigned char excp;
46
	unsigned char covered;
47 48
	char *msg;
} severities[] = {
49 50 51
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
#define  KERNEL		.context = IN_KERNEL
#define  USER		.context = IN_USER
52
#define  KERNEL_RECOV	.context = IN_KERNEL_RECOV
53 54
#define  SER		.ser = SER_REQUIRED
#define  NOSER		.ser = NO_SER
55 56
#define  EXCP		.excp = EXCP_CONTEXT
#define  NOEXCP		.excp = NO_EXCP
57 58 59 60
#define  BITCLR(x)	.mask = x, .result = 0
#define  BITSET(x)	.mask = x, .result = x
#define  MCGMASK(x, y)	.mcgmask = x, .mcgres = y
#define  MASK(x, y)	.mask = x, .result = y
A
Andi Kleen 已提交
61 62
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
63
#define	MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
A
Andi Kleen 已提交
64

65 66 67
	MCESEV(
		NO, "Invalid",
		BITCLR(MCI_STATUS_VAL)
68
		),
69 70
	MCESEV(
		NO, "Not enabled",
71
		EXCP, BITCLR(MCI_STATUS_EN)
72
		),
73 74 75
	MCESEV(
		PANIC, "Processor context corrupt",
		BITSET(MCI_STATUS_PCC)
76
		),
A
Andi Kleen 已提交
77
	/* When MCIP is not set something is very confused */
78 79
	MCESEV(
		PANIC, "MCIP not set in MCA handler",
80
		EXCP, MCGMASK(MCG_STATUS_MCIP, 0)
81
		),
A
Andi Kleen 已提交
82
	/* Neither return not error IP -- no chance to recover -> PANIC */
83 84
	MCESEV(
		PANIC, "Neither restart nor error IP",
85
		EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
86
		),
87
	MCESEV(
88
		PANIC, "In kernel and no restart IP",
89 90
		EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
		),
91 92 93 94
	MCESEV(
		PANIC, "In kernel and no restart IP",
		EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
		),
95 96 97
	MCESEV(
		DEFERRED, "Deferred error",
		NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED)
98
		),
99
	MCESEV(
100
		KEEP, "Corrected error",
101
		NOSER, BITCLR(MCI_STATUS_UC)
102
		),
A
Andi Kleen 已提交
103 104

	/* ignore OVER for UCNA */
105
	MCESEV(
106
		UCNA, "Uncorrected no action required",
107
		SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
108
		),
109
	MCESEV(
110
		PANIC, "Illegal combination (UCNA with AR=1)",
111 112
		SER,
		MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
113
		),
114
	MCESEV(
115
		KEEP, "Non signalled machine check",
116
		SER, BITCLR(MCI_STATUS_S)
117
		),
A
Andi Kleen 已提交
118

119
	MCESEV(
120
		PANIC, "Action required with lost events",
121
		SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
122
		),
123 124 125 126

	/* known AR MCACODs: */
#ifdef	CONFIG_MEMORY_FAILURE
	MCESEV(
127
		KEEP, "Action required but unaffected thread is continuable",
128 129
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
		MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
130
		),
131 132 133 134 135
	MCESEV(
		AR, "Action required: data load in error recoverable area of kernel",
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
		KERNEL_RECOV
		),
136
	MCESEV(
137
		AR, "Action required: data load error in a user process",
138
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
139 140
		USER
		),
141
	MCESEV(
142
		AR, "Action required: instruction fetch error in a user process",
143 144 145
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
		USER
		),
146
#endif
147
	MCESEV(
148
		PANIC, "Action required: unknown MCACOD",
149
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
150
		),
A
Andi Kleen 已提交
151 152

	/* known AO MCACODs: */
153
	MCESEV(
154
		AO, "Action optional: memory scrubbing error",
155
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
156
		),
157
	MCESEV(
158
		AO, "Action optional: last level cache writeback error",
159
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
160
		),
161
	MCESEV(
162
		SOME, "Action optional: unknown MCACOD",
163
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
164
		),
165
	MCESEV(
166
		SOME, "Action optional with lost events",
167
		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
168
		),
169 170 171

	MCESEV(
		PANIC, "Overflowed uncorrected",
172
		BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
173
		),
174 175 176
	MCESEV(
		UC, "Uncorrected",
		BITSET(MCI_STATUS_UC)
177
		),
178 179 180
	MCESEV(
		SOME, "No match",
		BITSET(0)
181
		)	/* always matches. keep at end */
182 183
};

184 185 186
#define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
				(MCG_STATUS_RIPV|MCG_STATUS_EIPV))

A
Andi Kleen 已提交
187
/*
188 189 190 191 192 193 194 195 196
 * If mcgstatus indicated that ip/cs on the stack were
 * no good, then "m->cs" will be zero and we will have
 * to assume the worst case (IN_KERNEL) as we actually
 * have no idea what we were executing when the machine
 * check hit.
 * If we do have a good "m->cs" (or a faked one in the
 * case we were executing in VM86 mode) we can use it to
 * distinguish an exception taken in user from from one
 * taken in the kernel.
A
Andi Kleen 已提交
197 198 199
 */
static int error_context(struct mce *m)
{
200 201 202 203 204
	if ((m->cs & 3) == 3)
		return IN_USER;
	if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip))
		return IN_KERNEL_RECOV;
	return IN_KERNEL;
A
Andi Kleen 已提交
205 206
}

207 208 209 210
/*
 * See AMD Error Scope Hierarchy table in a newer BKDG. For example
 * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
 */
211
static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
212
{
213 214
	enum context ctx = error_context(m);

215 216 217 218 219 220 221 222 223 224 225 226 227 228
	/* Processor Context Corrupt, no need to fumble too much, die! */
	if (m->status & MCI_STATUS_PCC)
		return MCE_PANIC_SEVERITY;

	if (m->status & MCI_STATUS_UC) {

		/*
		 * On older systems where overflow_recov flag is not present, we
		 * should simply panic if an error overflow occurs. If
		 * overflow_recov flag is present and set, then software can try
		 * to at least kill process to prolong system operation.
		 */
		if (mce_flags.overflow_recov) {
			/* software can try to contain */
229 230
			if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
				return MCE_PANIC_SEVERITY;
231

232 233
			/* kill current process */
			return MCE_AR_SEVERITY;
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
		} else {
			/* at least one error was not logged */
			if (m->status & MCI_STATUS_OVER)
				return MCE_PANIC_SEVERITY;
		}

		/*
		 * For any other case, return MCE_UC_SEVERITY so that we log the
		 * error and exit #MC handler.
		 */
		return MCE_UC_SEVERITY;
	}

	/*
	 * deferred error: poll handler catches these and adds to mce_ring so
	 * memory-failure can take recovery actions.
	 */
	if (m->status & MCI_STATUS_DEFERRED)
		return MCE_DEFERRED_SEVERITY;

	/*
	 * corrected error: poll handler catches these and passes responsibility
	 * of decoding the error to EDAC
	 */
	return MCE_KEEP_SEVERITY;
}

261
static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
262
{
263
	enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
264
	enum context ctx = error_context(m);
265
	struct severity *s;
A
Andi Kleen 已提交
266

267
	for (s = severities;; s++) {
268
		if ((m->status & s->mask) != s->result)
269
			continue;
270
		if ((m->mcgstatus & s->mcgmask) != s->mcgres)
271
			continue;
272
		if (s->ser == SER_REQUIRED && !mca_cfg.ser)
A
Andi Kleen 已提交
273
			continue;
274
		if (s->ser == NO_SER && mca_cfg.ser)
A
Andi Kleen 已提交
275 276 277
			continue;
		if (s->context && ctx != s->context)
			continue;
278 279
		if (s->excp && excp != s->excp)
			continue;
280 281
		if (msg)
			*msg = s->msg;
282
		s->covered = 1;
A
Andi Kleen 已提交
283 284 285 286
		if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
			if (panic_on_oops || tolerant < 1)
				return MCE_PANIC_SEVERITY;
		}
287 288 289
		return s->sev;
	}
}
290

291 292 293 294 295 296 297 298 299 300
/* Default to mce_severity_intel */
int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
		    mce_severity_intel;

void __init mcheck_vendor_init_severity(void)
{
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		mce_severity = mce_severity_amd;
}

301
#ifdef CONFIG_DEBUG_FS
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
static void *s_start(struct seq_file *f, loff_t *pos)
{
	if (*pos >= ARRAY_SIZE(severities))
		return NULL;
	return &severities[*pos];
}

static void *s_next(struct seq_file *f, void *data, loff_t *pos)
{
	if (++(*pos) >= ARRAY_SIZE(severities))
		return NULL;
	return &severities[*pos];
}

static void s_stop(struct seq_file *f, void *data)
{
}

static int s_show(struct seq_file *f, void *data)
{
	struct severity *ser = data;
	seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
	return 0;
}

static const struct seq_operations severities_seq_ops = {
	.start	= s_start,
	.next	= s_next,
	.stop	= s_stop,
	.show	= s_show,
};

static int severities_coverage_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &severities_seq_ops);
}

static ssize_t severities_coverage_write(struct file *file,
					 const char __user *ubuf,
					 size_t count, loff_t *ppos)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(severities); i++)
		severities[i].covered = 0;
	return count;
}

static const struct file_operations severities_coverage_fops = {
	.open		= severities_coverage_open,
	.release	= seq_release,
	.read		= seq_read,
	.write		= severities_coverage_write,
354
	.llseek		= seq_lseek,
355 356 357 358
};

static int __init severities_debugfs_init(void)
{
359
	struct dentry *dmce, *fsev;
360

361
	dmce = mce_get_debugfs_dir();
362
	if (!dmce)
363
		goto err_out;
364 365 366 367

	fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
				   &severities_coverage_fops);
	if (!fsev)
368 369 370 371 372 373 374 375
		goto err_out;

	return 0;

err_out:
	return -ENOMEM;
}
late_initcall(severities_debugfs_init);
376
#endif /* CONFIG_DEBUG_FS */