mce.c 57.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Machine check handler.
I
Ingo Molnar 已提交
3
 *
L
Linus Torvalds 已提交
4
 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 6
 * Rest from unknown author(s).
 * 2004 Andi Kleen. Rewrote most of it.
7 8
 * Copyright 2008 Intel Corporation
 * Author: Andi Kleen
L
Linus Torvalds 已提交
9
 */
10 11 12

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

I
Ingo Molnar 已提交
13 14 15 16 17 18 19
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/ratelimit.h>
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/kobject.h>
20
#include <linux/uaccess.h>
I
Ingo Molnar 已提交
21 22 23
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
24
#include <linux/string.h>
25
#include <linux/device.h>
26
#include <linux/syscore_ops.h>
27
#include <linux/delay.h>
28
#include <linux/ctype.h>
I
Ingo Molnar 已提交
29
#include <linux/sched.h>
30
#include <linux/sysfs.h>
I
Ingo Molnar 已提交
31
#include <linux/types.h>
32
#include <linux/slab.h>
I
Ingo Molnar 已提交
33 34 35
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/poll.h>
36
#include <linux/nmi.h>
I
Ingo Molnar 已提交
37
#include <linux/cpu.h>
38
#include <linux/smp.h>
I
Ingo Molnar 已提交
39
#include <linux/fs.h>
40
#include <linux/mm.h>
41
#include <linux/debugfs.h>
42
#include <linux/irq_work.h>
43
#include <linux/export.h>
I
Ingo Molnar 已提交
44

45
#include <asm/processor.h>
I
Ingo Molnar 已提交
46 47
#include <asm/mce.h>
#include <asm/msr.h>
L
Linus Torvalds 已提交
48

49
#include "mce-internal.h"
50

51
static DEFINE_MUTEX(mce_chrdev_read_mutex);
52

53
#define rcu_dereference_check_mce(p) \
54
	rcu_dereference_index_check((p), \
55
			      rcu_read_lock_sched_held() || \
56
			      lockdep_is_held(&mce_chrdev_read_mutex))
57

58 59 60
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

61 62
#define SPINUNIT 100	/* 100ns */

63 64
atomic_t mce_entry;

65 66
DEFINE_PER_CPU(unsigned, mce_exception_count);

67
struct mce_bank *mce_banks __read_mostly;
68

69
struct mca_config mca_cfg __read_mostly = {
70
	.bootlog  = -1,
71 72 73 74 75 76 77
	/*
	 * Tolerant levels:
	 * 0: always panic on uncorrected errors, log corrected errors
	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
	 * 3: never panic or SIGBUS, log all errors (for testing only)
	 */
78 79
	.tolerant = 1,
	.monarch_timeout = -1
80 81
};

82 83 84 85
/* User mode helper program triggered by machine check event */
static unsigned long		mce_need_notify;
static char			mce_helper[128];
static char			*mce_helper_argv[2] = { mce_helper, NULL };
L
Linus Torvalds 已提交
86

87 88
static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);

89 90 91
static DEFINE_PER_CPU(struct mce, mces_seen);
static int			cpu_missing;

92 93 94 95 96
/* MCA banks polled by the period polling timer for corrected events */
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
};

97 98
static DEFINE_PER_CPU(struct work_struct, mce_work);

99 100
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);

101 102 103 104 105 106
/*
 * CPU/chipset specific EDAC code can register a notifier call here to print
 * MCE errors in a human-readable form.
 */
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);

107 108 109 110
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
{
	memset(m, 0, sizeof(struct mce));
111
	m->cpu = m->extcpu = smp_processor_id();
112
	rdtscll(m->tsc);
113 114 115 116 117 118 119
	/* We hope get_seconds stays lockless */
	m->time = get_seconds();
	m->cpuvendor = boot_cpu_data.x86_vendor;
	m->cpuid = cpuid_eax(1);
	m->socketid = cpu_data(m->extcpu).phys_proc_id;
	m->apicid = cpu_data(m->extcpu).initial_apicid;
	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
120 121
}

122 123 124
DEFINE_PER_CPU(struct mce, injectm);
EXPORT_PER_CPU_SYMBOL_GPL(injectm);

L
Linus Torvalds 已提交
125 126 127 128 129 130
/*
 * Lockless MCE logging infrastructure.
 * This avoids deadlocks on printk locks without having to break locks. Also
 * separate MCEs from kernel messages to avoid bogus bug reports.
 */

131
static struct mce_log mcelog = {
132 133 134
	.signature	= MCE_LOG_SIGNATURE,
	.len		= MCE_LOG_LEN,
	.recordlen	= sizeof(struct mce),
135
};
L
Linus Torvalds 已提交
136 137 138 139

void mce_log(struct mce *mce)
{
	unsigned next, entry;
140
	int ret = 0;
I
Ingo Molnar 已提交
141

142 143 144
	/* Emit the trace record: */
	trace_mce_record(mce);

145 146 147 148
	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
	if (ret == NOTIFY_STOP)
		return;

L
Linus Torvalds 已提交
149
	mce->finished = 0;
M
Mike Waychison 已提交
150
	wmb();
L
Linus Torvalds 已提交
151
	for (;;) {
152
		entry = rcu_dereference_check_mce(mcelog.next);
153
		for (;;) {
154

I
Ingo Molnar 已提交
155 156 157 158 159
			/*
			 * When the buffer fills up discard new entries.
			 * Assume that the earlier errors are the more
			 * interesting ones:
			 */
160
			if (entry >= MCE_LOG_LEN) {
161 162
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
163 164
				return;
			}
I
Ingo Molnar 已提交
165
			/* Old left over entry. Skip: */
166 167 168 169
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
M
Mike Waychison 已提交
170
			break;
L
Linus Torvalds 已提交
171 172 173 174 175 176 177
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
M
Mike Waychison 已提交
178
	wmb();
L
Linus Torvalds 已提交
179
	mcelog.entry[entry].finished = 1;
M
Mike Waychison 已提交
180
	wmb();
L
Linus Torvalds 已提交
181

182
	mce->finished = 1;
183
	set_bit(0, &mce_need_notify);
L
Linus Torvalds 已提交
184 185
}

B
Borislav Petkov 已提交
186 187 188 189
static void drain_mcelog_buffer(void)
{
	unsigned int next, i, prev = 0;

190
	next = ACCESS_ONCE(mcelog.next);
B
Borislav Petkov 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

	do {
		struct mce *m;

		/* drain what was logged during boot */
		for (i = prev; i < next; i++) {
			unsigned long start = jiffies;
			unsigned retries = 1;

			m = &mcelog.entry[i];

			while (!m->finished) {
				if (time_after_eq(jiffies, start + 2*retries))
					retries++;

				cpu_relax();

				if (!m->finished && retries >= 4) {
209
					pr_err("skipping error being logged currently!\n");
B
Borislav Petkov 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223
					break;
				}
			}
			smp_rmb();
			atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
		}

		memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
		prev = next;
		next = cmpxchg(&mcelog.next, prev, 0);
	} while (next != prev);
}


224 225 226
void mce_register_decode_chain(struct notifier_block *nb)
{
	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
B
Borislav Petkov 已提交
227
	drain_mcelog_buffer();
228 229 230 231 232 233 234 235 236
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);

void mce_unregister_decode_chain(struct notifier_block *nb)
{
	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);

H
Hidetoshi Seto 已提交
237
static void print_mce(struct mce *m)
L
Linus Torvalds 已提交
238
{
239 240
	int ret = 0;

H
Huang Ying 已提交
241
	pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
242
	       m->extcpu, m->mcgstatus, m->bank, m->status);
243

244
	if (m->ip) {
H
Huang Ying 已提交
245
		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
246 247 248
			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
				m->cs, m->ip);

L
Linus Torvalds 已提交
249
		if (m->cs == __KERNEL_CS)
250
			print_symbol("{%s}", m->ip);
251
		pr_cont("\n");
L
Linus Torvalds 已提交
252
	}
253

H
Huang Ying 已提交
254
	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
L
Linus Torvalds 已提交
255
	if (m->addr)
256
		pr_cont("ADDR %llx ", m->addr);
L
Linus Torvalds 已提交
257
	if (m->misc)
258
		pr_cont("MISC %llx ", m->misc);
259

260
	pr_cont("\n");
261 262 263 264
	/*
	 * Note this output is parsed by external tools and old fields
	 * should not be changed.
	 */
265
	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
266 267
		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
		cpu_data(m->extcpu).microcode);
268 269 270

	/*
	 * Print out human-readable details about the MCE error,
271
	 * (if the CPU has an implementation for that)
272
	 */
273 274 275 276 277
	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
	if (ret == NOTIFY_STOP)
		return;

	pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
278 279
}

280 281 282 283
#define PANIC_TIMEOUT 5 /* 5 seconds */

static atomic_t mce_paniced;

284 285 286
static int fake_panic;
static atomic_t mce_fake_paniced;

287 288 289 290
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
{
	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
291

292 293 294 295
	preempt_disable();
	local_irq_enable();
	while (timeout-- > 0)
		udelay(1);
296
	if (panic_timeout == 0)
297
		panic_timeout = mca_cfg.panic_timeout;
298 299 300
	panic("Panicing machine check CPU died");
}

301
static void mce_panic(char *msg, struct mce *final, char *exp)
302
{
303
	int i, apei_err = 0;
304

305 306 307 308 309 310 311
	if (!fake_panic) {
		/*
		 * Make sure only one CPU runs in machine check panic
		 */
		if (atomic_inc_return(&mce_paniced) > 1)
			wait_for_panic();
		barrier();
312

313 314 315 316 317 318 319
		bust_spinlocks(1);
		console_verbose();
	} else {
		/* Don't log too much for fake panic */
		if (atomic_inc_return(&mce_fake_paniced) > 1)
			return;
	}
320
	/* First print corrected ones that are still unlogged */
L
Linus Torvalds 已提交
321
	for (i = 0; i < MCE_LOG_LEN; i++) {
322
		struct mce *m = &mcelog.entry[i];
H
Hidetoshi Seto 已提交
323 324
		if (!(m->status & MCI_STATUS_VAL))
			continue;
325
		if (!(m->status & MCI_STATUS_UC)) {
H
Hidetoshi Seto 已提交
326
			print_mce(m);
327 328 329
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
330 331 332 333 334
	}
	/* Now print uncorrected but with the final one last */
	for (i = 0; i < MCE_LOG_LEN; i++) {
		struct mce *m = &mcelog.entry[i];
		if (!(m->status & MCI_STATUS_VAL))
L
Linus Torvalds 已提交
335
			continue;
H
Hidetoshi Seto 已提交
336 337
		if (!(m->status & MCI_STATUS_UC))
			continue;
338
		if (!final || memcmp(m, final, sizeof(struct mce))) {
H
Hidetoshi Seto 已提交
339
			print_mce(m);
340 341 342
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
L
Linus Torvalds 已提交
343
	}
344
	if (final) {
H
Hidetoshi Seto 已提交
345
		print_mce(final);
346 347 348
		if (!apei_err)
			apei_err = apei_write_mce(final);
	}
349
	if (cpu_missing)
H
Huang Ying 已提交
350
		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
351
	if (exp)
H
Huang Ying 已提交
352
		pr_emerg(HW_ERR "Machine check: %s\n", exp);
353 354
	if (!fake_panic) {
		if (panic_timeout == 0)
355
			panic_timeout = mca_cfg.panic_timeout;
356 357
		panic(msg);
	} else
H
Huang Ying 已提交
358
		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
359
}
L
Linus Torvalds 已提交
360

361 362 363 364
/* Support code for software error injection */

static int msr_to_offset(u32 msr)
{
T
Tejun Heo 已提交
365
	unsigned bank = __this_cpu_read(injectm.bank);
366

367
	if (msr == mca_cfg.rip_msr)
368
		return offsetof(struct mce, ip);
369
	if (msr == MSR_IA32_MCx_STATUS(bank))
370
		return offsetof(struct mce, status);
371
	if (msr == MSR_IA32_MCx_ADDR(bank))
372
		return offsetof(struct mce, addr);
373
	if (msr == MSR_IA32_MCx_MISC(bank))
374 375 376 377 378 379
		return offsetof(struct mce, misc);
	if (msr == MSR_IA32_MCG_STATUS)
		return offsetof(struct mce, mcgstatus);
	return -1;
}

380 381 382 383
/* MSR access wrappers used for error injection */
static u64 mce_rdmsrl(u32 msr)
{
	u64 v;
384

T
Tejun Heo 已提交
385
	if (__this_cpu_read(injectm.finished)) {
386
		int offset = msr_to_offset(msr);
387

388 389 390 391
		if (offset < 0)
			return 0;
		return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
	}
392 393 394 395 396 397 398 399 400 401 402

	if (rdmsrl_safe(msr, &v)) {
		WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
		/*
		 * Return zero in case the access faulted. This should
		 * not happen normally but can happen if the CPU does
		 * something weird, or if the code is buggy.
		 */
		v = 0;
	}

403 404 405 406 407
	return v;
}

static void mce_wrmsrl(u32 msr, u64 v)
{
T
Tejun Heo 已提交
408
	if (__this_cpu_read(injectm.finished)) {
409
		int offset = msr_to_offset(msr);
410

411 412 413 414
		if (offset >= 0)
			*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
		return;
	}
415 416 417
	wrmsrl(msr, v);
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Collect all global (w.r.t. this processor) status about this machine
 * check into our "mce" struct so that we can use it later to assess
 * the severity of the problem as we read per-bank specific details.
 */
static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
{
	mce_setup(m);

	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
	if (regs) {
		/*
		 * Get the address of the instruction at the time of
		 * the machine check error.
		 */
		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
			m->ip = regs->ip;
			m->cs = regs->cs;
436 437 438 439 440 441 442 443

			/*
			 * When in VM86 mode make the cs look like ring 3
			 * always. This is a lie, but it's better than passing
			 * the additional vm86 bit around everywhere.
			 */
			if (v8086_mode(regs))
				m->cs |= 3;
444 445
		}
		/* Use accurate RIP reporting if available. */
446 447
		if (mca_cfg.rip_msr)
			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
448 449 450
	}
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
/*
 * Simple lockless ring to communicate PFNs from the exception handler with the
 * process context work function. This is vastly simplified because there's
 * only a single reader and a single writer.
 */
#define MCE_RING_SIZE 16	/* we use one entry less */

struct mce_ring {
	unsigned short start;
	unsigned short end;
	unsigned long ring[MCE_RING_SIZE];
};
static DEFINE_PER_CPU(struct mce_ring, mce_ring);

/* Runs with CPU affinity in workqueue */
static int mce_ring_empty(void)
{
	struct mce_ring *r = &__get_cpu_var(mce_ring);

	return r->start == r->end;
}

static int mce_ring_get(unsigned long *pfn)
{
	struct mce_ring *r;
	int ret = 0;

	*pfn = 0;
	get_cpu();
	r = &__get_cpu_var(mce_ring);
	if (r->start == r->end)
		goto out;
	*pfn = r->ring[r->start];
	r->start = (r->start + 1) % MCE_RING_SIZE;
	ret = 1;
out:
	put_cpu();
	return ret;
}

/* Always runs in MCE context with preempt off */
static int mce_ring_add(unsigned long pfn)
{
	struct mce_ring *r = &__get_cpu_var(mce_ring);
	unsigned next;

	next = (r->end + 1) % MCE_RING_SIZE;
	if (next == r->start)
		return -1;
	r->ring[r->end] = pfn;
	wmb();
	r->end = next;
	return 0;
}

A
Andi Kleen 已提交
506
int mce_available(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
507
{
508
	if (mca_cfg.disabled)
509
		return 0;
510
	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
L
Linus Torvalds 已提交
511 512
}

513 514 515 516 517 518 519 520 521
static void mce_schedule_work(void)
{
	if (!mce_ring_empty()) {
		struct work_struct *work = &__get_cpu_var(mce_work);
		if (!work_pending(work))
			schedule_work(work);
	}
}

522 523 524
DEFINE_PER_CPU(struct irq_work, mce_irq_work);

static void mce_irq_work_cb(struct irq_work *entry)
525
{
526
	mce_notify_irq();
527
	mce_schedule_work();
528 529 530 531 532
}

static void mce_report_event(struct pt_regs *regs)
{
	if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
533
		mce_notify_irq();
534 535 536 537 538 539 540
		/*
		 * Triggering the work queue here is just an insurance
		 * policy in case the syscall exit notify handler
		 * doesn't run soon enough or ends up running on the
		 * wrong CPU (can happen when audit sleeps)
		 */
		mce_schedule_work();
541 542 543
		return;
	}

544
	irq_work_queue(&__get_cpu_var(mce_irq_work));
545 546
}

547 548 549 550 551 552 553 554 555 556 557 558 559
/*
 * Read ADDR and MISC registers.
 */
static void mce_read_aux(struct mce *m, int i)
{
	if (m->status & MCI_STATUS_MISCV)
		m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
	if (m->status & MCI_STATUS_ADDRV) {
		m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));

		/*
		 * Mask the reported address by the reported granularity.
		 */
560
		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
561 562 563 564 565 566 567
			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
			m->addr >>= shift;
			m->addr <<= shift;
		}
	}
}

568 569
DEFINE_PER_CPU(unsigned, mce_poll_count);

570
/*
571 572 573 574
 * Poll for corrected events or events that happened before reset.
 * Those are just logged through /dev/mcelog.
 *
 * This is executed in standard interrupt context.
A
Andi Kleen 已提交
575 576 577 578 579 580 581 582 583
 *
 * Note: spec recommends to panic for fatal unsignalled
 * errors here. However this would be quite problematic --
 * we would need to reimplement the Monarch handling and
 * it would mess up the exclusion between exception handler
 * and poll hander -- * so we skip this for now.
 * These cases should not happen anyways, or only when the CPU
 * is already totally * confused. In this case it's likely it will
 * not fully execute the machine check handler either.
584
 */
585
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
586 587 588 589
{
	struct mce m;
	int i;

590
	this_cpu_inc(mce_poll_count);
591

592
	mce_gather_info(&m, NULL);
593

594
	for (i = 0; i < mca_cfg.banks; i++) {
595
		if (!mce_banks[i].ctl || !test_bit(i, *b))
596 597 598 599 600 601 602 603
			continue;

		m.misc = 0;
		m.addr = 0;
		m.bank = i;
		m.tsc = 0;

		barrier();
604
		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
605 606 607 608
		if (!(m.status & MCI_STATUS_VAL))
			continue;

		/*
A
Andi Kleen 已提交
609 610
		 * Uncorrected or signalled events are handled by the exception
		 * handler when it is enabled, so don't process those here.
611 612 613
		 *
		 * TBD do the same check for MCI_STATUS_EN here?
		 */
A
Andi Kleen 已提交
614
		if (!(flags & MCP_UC) &&
615
		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
616 617
			continue;

618
		mce_read_aux(&m, i);
619 620 621 622 623 624 625

		if (!(flags & MCP_TIMESTAMP))
			m.tsc = 0;
		/*
		 * Don't get the IP here because it's unlikely to
		 * have anything to do with the actual error location.
		 */
626
		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
A
Andi Kleen 已提交
627
			mce_log(&m);
628 629 630 631

		/*
		 * Clear state for this bank.
		 */
632
		mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
633 634 635 636 637 638
	}

	/*
	 * Don't clear MCG_STATUS here because it's only defined for
	 * exceptions.
	 */
639 640

	sync_core();
641
}
642
EXPORT_SYMBOL_GPL(machine_check_poll);
643

644 645 646 647
/*
 * Do a quick check if any of the events requires a panic.
 * This decides if we keep the events around or clear them.
 */
648 649
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
			  struct pt_regs *regs)
650
{
651
	int i, ret = 0;
652

653
	for (i = 0; i < mca_cfg.banks; i++) {
654
		m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
655
		if (m->status & MCI_STATUS_VAL) {
656
			__set_bit(i, validp);
657 658 659
			if (quirk_no_way_out)
				quirk_no_way_out(i, m, regs);
		}
660
		if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY)
661
			ret = 1;
662
	}
663
	return ret;
664 665
}

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/*
 * Variable to establish order between CPUs while scanning.
 * Each CPU spins initially until executing is equal its number.
 */
static atomic_t mce_executing;

/*
 * Defines order of CPUs on entry. First CPU becomes Monarch.
 */
static atomic_t mce_callin;

/*
 * Check if a timeout waiting for other CPUs happened.
 */
static int mce_timed_out(u64 *t)
{
	/*
	 * The others already did panic for some reason.
	 * Bail out like in a timeout.
	 * rmb() to tell the compiler that system_state
	 * might have been modified by someone else.
	 */
	rmb();
	if (atomic_read(&mce_paniced))
		wait_for_panic();
691
	if (!mca_cfg.monarch_timeout)
692 693 694
		goto out;
	if ((s64)*t < SPINUNIT) {
		/* CHECKME: Make panic default for 1 too? */
695
		if (mca_cfg.tolerant < 1)
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
			mce_panic("Timeout synchronizing machine check over CPUs",
				  NULL, NULL);
		cpu_missing = 1;
		return 1;
	}
	*t -= SPINUNIT;
out:
	touch_nmi_watchdog();
	return 0;
}

/*
 * The Monarch's reign.  The Monarch is the CPU who entered
 * the machine check handler first. It waits for the others to
 * raise the exception too and then grades them. When any
 * error is fatal panic. Only then let the others continue.
 *
 * The other CPUs entering the MCE handler will be controlled by the
 * Monarch. They are called Subjects.
 *
 * This way we prevent any potential data corruption in a unrecoverable case
 * and also makes sure always all CPU's errors are examined.
 *
719
 * Also this detects the case of a machine check event coming from outer
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
 * space (not detected by any CPUs) In this case some external agent wants
 * us to shut down, so panic too.
 *
 * The other CPUs might still decide to panic if the handler happens
 * in a unrecoverable place, but in this case the system is in a semi-stable
 * state and won't corrupt anything by itself. It's ok to let the others
 * continue for a bit first.
 *
 * All the spin loops have timeouts; when a timeout happens a CPU
 * typically elects itself to be Monarch.
 */
static void mce_reign(void)
{
	int cpu;
	struct mce *m = NULL;
	int global_worst = 0;
	char *msg = NULL;
	char *nmsg = NULL;

	/*
	 * This CPU is the Monarch and the other CPUs have run
	 * through their handlers.
	 * Grade the severity of the errors of all the CPUs.
	 */
	for_each_possible_cpu(cpu) {
745 746
		int severity = mce_severity(&per_cpu(mces_seen, cpu),
					    mca_cfg.tolerant,
747 748 749 750 751 752 753 754 755 756 757 758 759
					    &nmsg);
		if (severity > global_worst) {
			msg = nmsg;
			global_worst = severity;
			m = &per_cpu(mces_seen, cpu);
		}
	}

	/*
	 * Cannot recover? Panic here then.
	 * This dumps all the mces in the log buffer and stops the
	 * other CPUs.
	 */
760
	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
761
		mce_panic("Fatal Machine check", m, msg);
762 763 764 765 766 767 768 769 770 771 772

	/*
	 * For UC somewhere we let the CPU who detects it handle it.
	 * Also must let continue the others, otherwise the handling
	 * CPU could deadlock on a lock.
	 */

	/*
	 * No machine check event found. Must be some external
	 * source or one CPU is hung. Panic.
	 */
773
	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
		mce_panic("Machine check from unknown source", NULL, NULL);

	/*
	 * Now clear all the mces_seen so that they don't reappear on
	 * the next mce.
	 */
	for_each_possible_cpu(cpu)
		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
}

static atomic_t global_nwo;

/*
 * Start of Monarch synchronization. This waits until all CPUs have
 * entered the exception handler and then determines if any of them
 * saw a fatal event that requires panic. Then it executes them
 * in the entry order.
 * TBD double check parallel CPU hotunplug
 */
H
Hidetoshi Seto 已提交
793
static int mce_start(int *no_way_out)
794
{
H
Hidetoshi Seto 已提交
795
	int order;
796
	int cpus = num_online_cpus();
797
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
798

H
Hidetoshi Seto 已提交
799 800
	if (!timeout)
		return -1;
801

H
Hidetoshi Seto 已提交
802
	atomic_add(*no_way_out, &global_nwo);
803 804 805 806
	/*
	 * global_nwo should be updated before mce_callin
	 */
	smp_wmb();
807
	order = atomic_inc_return(&mce_callin);
808 809 810 811 812 813 814

	/*
	 * Wait for everyone.
	 */
	while (atomic_read(&mce_callin) != cpus) {
		if (mce_timed_out(&timeout)) {
			atomic_set(&global_nwo, 0);
H
Hidetoshi Seto 已提交
815
			return -1;
816 817 818 819
		}
		ndelay(SPINUNIT);
	}

820 821 822 823
	/*
	 * mce_callin should be read before global_nwo
	 */
	smp_rmb();
824

H
Hidetoshi Seto 已提交
825 826 827 828
	if (order == 1) {
		/*
		 * Monarch: Starts executing now, the others wait.
		 */
829
		atomic_set(&mce_executing, 1);
H
Hidetoshi Seto 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843
	} else {
		/*
		 * Subject: Now start the scanning loop one by one in
		 * the original callin order.
		 * This way when there are any shared banks it will be
		 * only seen by one CPU before cleared, avoiding duplicates.
		 */
		while (atomic_read(&mce_executing) < order) {
			if (mce_timed_out(&timeout)) {
				atomic_set(&global_nwo, 0);
				return -1;
			}
			ndelay(SPINUNIT);
		}
844 845 846
	}

	/*
H
Hidetoshi Seto 已提交
847
	 * Cache the global no_way_out state.
848
	 */
H
Hidetoshi Seto 已提交
849 850 851
	*no_way_out = atomic_read(&global_nwo);

	return order;
852 853 854 855 856 857 858 859 860
}

/*
 * Synchronize between CPUs after main scanning loop.
 * This invokes the bulk of the Monarch processing.
 */
static int mce_end(int order)
{
	int ret = -1;
861
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

	if (!timeout)
		goto reset;
	if (order < 0)
		goto reset;

	/*
	 * Allow others to run.
	 */
	atomic_inc(&mce_executing);

	if (order == 1) {
		/* CHECKME: Can this race with a parallel hotplug? */
		int cpus = num_online_cpus();

		/*
		 * Monarch: Wait for everyone to go through their scanning
		 * loops.
		 */
		while (atomic_read(&mce_executing) <= cpus) {
			if (mce_timed_out(&timeout))
				goto reset;
			ndelay(SPINUNIT);
		}

		mce_reign();
		barrier();
		ret = 0;
	} else {
		/*
		 * Subject: Wait for Monarch to finish.
		 */
		while (atomic_read(&mce_executing) != 0) {
			if (mce_timed_out(&timeout))
				goto reset;
			ndelay(SPINUNIT);
		}

		/*
		 * Don't reset anything. That's done by the Monarch.
		 */
		return 0;
	}

	/*
	 * Reset all global state.
	 */
reset:
	atomic_set(&global_nwo, 0);
	atomic_set(&mce_callin, 0);
	barrier();

	/*
	 * Let others run again.
	 */
	atomic_set(&mce_executing, 0);
	return ret;
}

921 922 923 924
/*
 * Check if the address reported by the CPU is in a format we can parse.
 * It would be possible to add code for most other cases, but all would
 * be somewhat complicated (e.g. segment offset would require an instruction
L
Lucas De Marchi 已提交
925
 * parser). So only support physical addresses up to page granuality for now.
926 927 928 929 930
 */
static int mce_usable_address(struct mce *m)
{
	if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
		return 0;
931
	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
932
		return 0;
933
	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
934 935 936 937
		return 0;
	return 1;
}

938 939 940 941
static void mce_clear_state(unsigned long *toclear)
{
	int i;

942
	for (i = 0; i < mca_cfg.banks; i++) {
943
		if (test_bit(i, toclear))
944
			mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
945 946 947
	}
}

948 949 950 951 952 953 954 955 956 957 958
/*
 * Need to save faulting physical address associated with a process
 * in the machine check handler some place where we can grab it back
 * later in mce_notify_process()
 */
#define	MCE_INFO_MAX	16

struct mce_info {
	atomic_t		inuse;
	struct task_struct	*t;
	__u64			paddr;
959
	int			restartable;
960 961
} mce_info[MCE_INFO_MAX];

962
static void mce_save_info(__u64 addr, int c)
963 964 965 966 967 968 969
{
	struct mce_info *mi;

	for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
		if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
			mi->t = current;
			mi->paddr = addr;
970
			mi->restartable = c;
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
			return;
		}
	}

	mce_panic("Too many concurrent recoverable errors", NULL, NULL);
}

static struct mce_info *mce_find_info(void)
{
	struct mce_info *mi;

	for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
		if (atomic_read(&mi->inuse) && mi->t == current)
			return mi;
	return NULL;
}

static void mce_clear_info(struct mce_info *mi)
{
	atomic_set(&mi->inuse, 0);
}

993 994 995 996 997 998 999
/*
 * The actual machine check handler. This only handles real
 * exceptions when something got corrupted coming in through int 18.
 *
 * This is executed in NMI context not subject to normal locking rules. This
 * implies that most kernel services cannot be safely used. Don't even
 * think about putting a printk in there!
1000 1001 1002 1003
 *
 * On Intel systems this is entered on all CPUs in parallel through
 * MCE broadcast. However some CPUs might be broken beyond repair,
 * so be always careful when synchronizing with others.
L
Linus Torvalds 已提交
1004
 */
I
Ingo Molnar 已提交
1005
void do_machine_check(struct pt_regs *regs, long error_code)
L
Linus Torvalds 已提交
1006
{
1007
	struct mca_config *cfg = &mca_cfg;
1008
	struct mce m, *final;
L
Linus Torvalds 已提交
1009
	int i;
1010 1011 1012 1013 1014 1015
	int worst = 0;
	int severity;
	/*
	 * Establish sequential order between the CPUs entering the machine
	 * check handler.
	 */
H
Hidetoshi Seto 已提交
1016
	int order;
1017 1018
	/*
	 * If no_way_out gets set, there is no safe way to recover from this
1019
	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1020 1021 1022 1023 1024 1025 1026
	 */
	int no_way_out = 0;
	/*
	 * If kill_it gets set, there might be a way to recover from this
	 * error.
	 */
	int kill_it = 0;
1027
	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1028
	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1029
	char *msg = "Unknown";
L
Linus Torvalds 已提交
1030

1031 1032
	atomic_inc(&mce_entry);

1033
	this_cpu_inc(mce_exception_count);
1034

1035
	if (!cfg->banks)
1036
		goto out;
L
Linus Torvalds 已提交
1037

1038
	mce_gather_info(&m, regs);
1039

1040 1041 1042
	final = &__get_cpu_var(mces_seen);
	*final = m;

1043
	memset(valid_banks, 0, sizeof(valid_banks));
1044
	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1045

L
Linus Torvalds 已提交
1046 1047
	barrier();

A
Andi Kleen 已提交
1048
	/*
1049 1050 1051
	 * When no restart IP might need to kill or panic.
	 * Assume the worst for now, but if we find the
	 * severity is MCE_AR_SEVERITY we have other options.
A
Andi Kleen 已提交
1052 1053 1054 1055
	 */
	if (!(m.mcgstatus & MCG_STATUS_RIPV))
		kill_it = 1;

1056 1057 1058 1059 1060
	/*
	 * Go through all the banks in exclusion of the other CPUs.
	 * This way we don't report duplicated events on shared banks
	 * because the first one to see it will clear it.
	 */
H
Hidetoshi Seto 已提交
1061
	order = mce_start(&no_way_out);
1062
	for (i = 0; i < cfg->banks; i++) {
1063
		__clear_bit(i, toclear);
1064 1065
		if (!test_bit(i, valid_banks))
			continue;
1066
		if (!mce_banks[i].ctl)
L
Linus Torvalds 已提交
1067
			continue;
1068 1069

		m.misc = 0;
L
Linus Torvalds 已提交
1070 1071 1072
		m.addr = 0;
		m.bank = i;

1073
		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
L
Linus Torvalds 已提交
1074 1075 1076
		if ((m.status & MCI_STATUS_VAL) == 0)
			continue;

1077
		/*
A
Andi Kleen 已提交
1078 1079
		 * Non uncorrected or non signaled errors are handled by
		 * machine_check_poll. Leave them alone, unless this panics.
1080
		 */
1081
		if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
A
Andi Kleen 已提交
1082
			!no_way_out)
1083 1084 1085 1086 1087 1088 1089
			continue;

		/*
		 * Set taint even when machine check was not enabled.
		 */
		add_taint(TAINT_MACHINE_CHECK);

1090
		severity = mce_severity(&m, cfg->tolerant, NULL);
1091

A
Andi Kleen 已提交
1092 1093 1094 1095 1096 1097 1098 1099
		/*
		 * When machine check was for corrected handler don't touch,
		 * unless we're panicing.
		 */
		if (severity == MCE_KEEP_SEVERITY && !no_way_out)
			continue;
		__set_bit(i, toclear);
		if (severity == MCE_NO_SEVERITY) {
1100 1101 1102 1103 1104
			/*
			 * Machine check event was not enabled. Clear, but
			 * ignore.
			 */
			continue;
L
Linus Torvalds 已提交
1105 1106
		}

1107
		mce_read_aux(&m, i);
L
Linus Torvalds 已提交
1108

1109 1110 1111 1112 1113
		/*
		 * Action optional error. Queue address for later processing.
		 * When the ring overflows we just ignore the AO error.
		 * RED-PEN add some logging mechanism when
		 * usable_address or mce_add_ring fails.
1114
		 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1115 1116 1117 1118
		 */
		if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
			mce_ring_add(m.addr >> PAGE_SHIFT);

1119
		mce_log(&m);
L
Linus Torvalds 已提交
1120

1121 1122 1123
		if (severity > worst) {
			*final = m;
			worst = severity;
L
Linus Torvalds 已提交
1124 1125 1126
		}
	}

1127 1128 1129
	/* mce_clear_state will clear *final, save locally for use later */
	m = *final;

1130 1131 1132
	if (!no_way_out)
		mce_clear_state(toclear);

I
Ingo Molnar 已提交
1133
	/*
1134 1135
	 * Do most of the synchronization with other CPUs.
	 * When there's any problem use only local no_way_out state.
I
Ingo Molnar 已提交
1136
	 */
1137 1138
	if (mce_end(order) < 0)
		no_way_out = worst >= MCE_PANIC_SEVERITY;
1139 1140

	/*
1141 1142 1143 1144
	 * At insane "tolerant" levels we take no action. Otherwise
	 * we only die if we have no other choice. For less serious
	 * issues we try to recover, or limit damage to the current
	 * process.
1145
	 */
1146
	if (cfg->tolerant < 3) {
1147 1148 1149 1150
		if (no_way_out)
			mce_panic("Fatal machine check on current CPU", &m, msg);
		if (worst == MCE_AR_SEVERITY) {
			/* schedule action before return to userland */
1151
			mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
1152 1153 1154 1155 1156
			set_thread_flag(TIF_MCE_NOTIFY);
		} else if (kill_it) {
			force_sig(SIGBUS, current);
		}
	}
1157

1158 1159
	if (worst > 0)
		mce_report_event(regs);
1160
	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1161
out:
1162
	atomic_dec(&mce_entry);
1163
	sync_core();
L
Linus Torvalds 已提交
1164
}
1165
EXPORT_SYMBOL_GPL(do_machine_check);
L
Linus Torvalds 已提交
1166

1167 1168
#ifndef CONFIG_MEMORY_FAILURE
int memory_failure(unsigned long pfn, int vector, int flags)
1169
{
1170 1171
	/* mce_severity() should not hand us an ACTION_REQUIRED error */
	BUG_ON(flags & MF_ACTION_REQUIRED);
1172 1173 1174
	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
	       pfn);
1175 1176

	return 0;
1177
}
1178
#endif
1179 1180

/*
1181 1182 1183 1184 1185 1186
 * Called in process context that interrupted by MCE and marked with
 * TIF_MCE_NOTIFY, just before returning to erroneous userland.
 * This code is allowed to sleep.
 * Attempt possible recovery such as calling the high level VM handler to
 * process any corrupted pages, and kill/signal current process if required.
 * Action required errors are handled here.
1187 1188 1189 1190
 */
void mce_notify_process(void)
{
	unsigned long pfn;
1191
	struct mce_info *mi = mce_find_info();
1192
	int flags = MF_ACTION_REQUIRED;
1193 1194 1195 1196 1197 1198 1199 1200 1201

	if (!mi)
		mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
	pfn = mi->paddr >> PAGE_SHIFT;

	clear_thread_flag(TIF_MCE_NOTIFY);

	pr_err("Uncorrected hardware memory error in user-access at %llx",
		 mi->paddr);
1202 1203 1204 1205 1206
	/*
	 * We must call memory_failure() here even if the current process is
	 * doomed. We still need to mark the page as poisoned and alert any
	 * other users of the page.
	 */
1207 1208 1209
	if (!mi->restartable)
		flags |= MF_MUST_KILL;
	if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
1210 1211 1212 1213
		pr_err("Memory error not recovered");
		force_sig(SIGBUS, current);
	}
	mce_clear_info(mi);
1214 1215
}

1216 1217 1218 1219 1220
/*
 * Action optional processing happens here (picking up
 * from the list of faulting pages that do_machine_check()
 * placed into the "ring").
 */
1221 1222
static void mce_process_work(struct work_struct *dummy)
{
1223 1224 1225 1226
	unsigned long pfn;

	while (mce_ring_get(&pfn))
		memory_failure(pfn, MCE_VECTOR, 0);
1227 1228
}

1229 1230 1231
#ifdef CONFIG_X86_MCE_INTEL
/***
 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
S
Simon Arlott 已提交
1232
 * @cpu: The CPU on which the event occurred.
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
 * @status: Event status information
 *
 * This function should be called by the thermal interrupt after the
 * event has been processed and the decision was made to log the event
 * further.
 *
 * The status parameter will be saved to the 'status' field of 'struct mce'
 * and historically has been the register value of the
 * MSR_IA32_THERMAL_STATUS (Intel) msr.
 */
1243
void mce_log_therm_throt_event(__u64 status)
1244 1245 1246
{
	struct mce m;

1247
	mce_setup(&m);
1248 1249 1250 1251 1252 1253
	m.bank = MCE_THERMAL_BANK;
	m.status = status;
	mce_log(&m);
}
#endif /* CONFIG_X86_MCE_INTEL */

L
Linus Torvalds 已提交
1254
/*
1255 1256 1257
 * Periodic polling timer for "silent" machine check errors.  If the
 * poller finds an MCE, poll 2x faster.  When the poller finds no more
 * errors, poll 2x slower (up to check_interval seconds).
L
Linus Torvalds 已提交
1258
 */
T
Thomas Gleixner 已提交
1259
static unsigned long check_interval = 5 * 60; /* 5 minutes */
I
Ingo Molnar 已提交
1260

T
Thomas Gleixner 已提交
1261
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1262
static DEFINE_PER_CPU(struct timer_list, mce_timer);
L
Linus Torvalds 已提交
1263

C
Chen Gong 已提交
1264 1265 1266 1267 1268 1269 1270 1271
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
	return interval;
}

static unsigned long (*mce_adjust_timer)(unsigned long interval) =
	mce_adjust_timer_default;

T
Thomas Gleixner 已提交
1272
static void mce_timer_fn(unsigned long data)
L
Linus Torvalds 已提交
1273
{
T
Thomas Gleixner 已提交
1274 1275
	struct timer_list *t = &__get_cpu_var(mce_timer);
	unsigned long iv;
1276 1277 1278

	WARN_ON(smp_processor_id() != data);

1279
	if (mce_available(__this_cpu_ptr(&cpu_info))) {
1280 1281
		machine_check_poll(MCP_TIMESTAMP,
				&__get_cpu_var(mce_poll_banks));
C
Chen Gong 已提交
1282
		mce_intel_cmci_poll();
I
Ingo Molnar 已提交
1283
	}
L
Linus Torvalds 已提交
1284 1285

	/*
1286 1287
	 * Alert userspace if needed.  If we logged an MCE, reduce the
	 * polling interval, otherwise increase the polling interval.
L
Linus Torvalds 已提交
1288
	 */
T
Thomas Gleixner 已提交
1289
	iv = __this_cpu_read(mce_next_interval);
C
Chen Gong 已提交
1290
	if (mce_notify_irq()) {
1291
		iv = max(iv / 2, (unsigned long) HZ/100);
C
Chen Gong 已提交
1292
	} else {
T
Thomas Gleixner 已提交
1293
		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
C
Chen Gong 已提交
1294 1295
		iv = mce_adjust_timer(iv);
	}
T
Thomas Gleixner 已提交
1296
	__this_cpu_write(mce_next_interval, iv);
C
Chen Gong 已提交
1297 1298 1299 1300 1301 1302
	/* Might have become 0 after CMCI storm subsided */
	if (iv) {
		t->expires = jiffies + iv;
		add_timer_on(t, smp_processor_id());
	}
}
1303

C
Chen Gong 已提交
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
/*
 * Ensure that the timer is firing in @interval from now.
 */
void mce_timer_kick(unsigned long interval)
{
	struct timer_list *t = &__get_cpu_var(mce_timer);
	unsigned long when = jiffies + interval;
	unsigned long iv = __this_cpu_read(mce_next_interval);

	if (timer_pending(t)) {
		if (time_before(when, t->expires))
			mod_timer_pinned(t, when);
	} else {
		t->expires = round_jiffies(when);
		add_timer_on(t, smp_processor_id());
	}
	if (interval < iv)
		__this_cpu_write(mce_next_interval, interval);
1322 1323
}

1324 1325 1326 1327 1328 1329 1330 1331 1332
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		del_timer_sync(&per_cpu(mce_timer, cpu));
}

1333 1334
static void mce_do_trigger(struct work_struct *work)
{
1335
	call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1336 1337 1338 1339
}

static DECLARE_WORK(mce_trigger_work, mce_do_trigger);

1340
/*
1341 1342 1343
 * Notify the user(s) about new machine check events.
 * Can be called from interrupt context, but not from machine check/NMI
 * context.
1344
 */
1345
int mce_notify_irq(void)
1346
{
1347 1348 1349
	/* Not more than two messages every minute */
	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);

1350
	if (test_and_clear_bit(0, &mce_need_notify)) {
1351 1352
		/* wake processes polling /dev/mcelog */
		wake_up_interruptible(&mce_chrdev_wait);
1353 1354 1355 1356 1357 1358

		/*
		 * There is no risk of missing notifications because
		 * work_pending is always cleared before the function is
		 * executed.
		 */
1359
		if (mce_helper[0] && !work_pending(&mce_trigger_work))
1360
			schedule_work(&mce_trigger_work);
1361

1362
		if (__ratelimit(&ratelimit))
H
Huang Ying 已提交
1363
			pr_info(HW_ERR "Machine check events logged\n");
1364 1365

		return 1;
L
Linus Torvalds 已提交
1366
	}
1367 1368
	return 0;
}
1369
EXPORT_SYMBOL_GPL(mce_notify_irq);
1370

H
Hidetoshi Seto 已提交
1371
static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1372 1373
{
	int i;
1374
	u8 num_banks = mca_cfg.banks;
1375

1376
	mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1377 1378
	if (!mce_banks)
		return -ENOMEM;
1379 1380

	for (i = 0; i < num_banks; i++) {
1381
		struct mce_bank *b = &mce_banks[i];
1382

1383 1384 1385 1386 1387 1388
		b->ctl = -1ULL;
		b->init = 1;
	}
	return 0;
}

1389
/*
L
Linus Torvalds 已提交
1390 1391
 * Initialize Machine Checks for a CPU.
 */
1392
static int __cpuinit __mcheck_cpu_cap_init(void)
L
Linus Torvalds 已提交
1393
{
1394
	unsigned b;
I
Ingo Molnar 已提交
1395
	u64 cap;
L
Linus Torvalds 已提交
1396 1397

	rdmsrl(MSR_IA32_MCG_CAP, cap);
1398 1399

	b = cap & MCG_BANKCNT_MASK;
1400
	if (!mca_cfg.banks)
1401
		pr_info("CPU supports %d MCE banks\n", b);
1402

1403
	if (b > MAX_NR_BANKS) {
1404
		pr_warn("Using only %u machine check banks out of %u\n",
1405 1406 1407 1408 1409
			MAX_NR_BANKS, b);
		b = MAX_NR_BANKS;
	}

	/* Don't support asymmetric configurations today */
1410 1411 1412
	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
	mca_cfg.banks = b;

1413
	if (!mce_banks) {
H
Hidetoshi Seto 已提交
1414
		int err = __mcheck_cpu_mce_banks_init();
1415

1416 1417
		if (err)
			return err;
L
Linus Torvalds 已提交
1418
	}
1419

1420
	/* Use accurate RIP reporting if available. */
1421
	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1422
		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
L
Linus Torvalds 已提交
1423

A
Andi Kleen 已提交
1424
	if (cap & MCG_SER_P)
1425
		mca_cfg.ser = true;
A
Andi Kleen 已提交
1426

1427 1428 1429
	return 0;
}

1430
static void __mcheck_cpu_init_generic(void)
1431
{
1432
	enum mcp_flags m_fl = 0;
I
Ingo Molnar 已提交
1433
	mce_banks_t all_banks;
1434 1435 1436
	u64 cap;
	int i;

1437 1438 1439
	if (!mca_cfg.bootlog)
		m_fl = MCP_DONTLOG;

1440 1441 1442
	/*
	 * Log the machine checks left over from the previous reset.
	 */
1443
	bitmap_fill(all_banks, MAX_NR_BANKS);
1444
	machine_check_poll(MCP_UC | m_fl, &all_banks);
L
Linus Torvalds 已提交
1445 1446 1447

	set_in_cr4(X86_CR4_MCE);

1448
	rdmsrl(MSR_IA32_MCG_CAP, cap);
L
Linus Torvalds 已提交
1449 1450 1451
	if (cap & MCG_CTL_P)
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);

1452
	for (i = 0; i < mca_cfg.banks; i++) {
1453
		struct mce_bank *b = &mce_banks[i];
1454

1455
		if (!b->init)
1456
			continue;
1457 1458
		wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
		wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1459
	}
L
Linus Torvalds 已提交
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
/*
 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
 * Vol 3B Table 15-20). But this confuses both the code that determines
 * whether the machine check occurred in kernel or user mode, and also
 * the severity assessment code. Pretend that EIPV was set, and take the
 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
 */
static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
{
	if (bank != 0)
		return;
	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
		return;
	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
			  MCACOD)) !=
			 (MCI_STATUS_UC|MCI_STATUS_EN|
			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
			  MCI_STATUS_AR|MCACOD_INSTR))
		return;

	m->mcgstatus |= MCG_STATUS_EIPV;
	m->ip = regs->ip;
	m->cs = regs->cs;
}

L
Linus Torvalds 已提交
1490
/* Add per CPU specific workarounds here */
1491
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1492
{
1493 1494
	struct mca_config *cfg = &mca_cfg;

1495
	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1496
		pr_info("unknown CPU type - not enabling MCE support\n");
1497 1498 1499
		return -EOPNOTSUPP;
	}

L
Linus Torvalds 已提交
1500
	/* This should be disabled by the BIOS, but isn't always */
1501
	if (c->x86_vendor == X86_VENDOR_AMD) {
1502
		if (c->x86 == 15 && cfg->banks > 4) {
I
Ingo Molnar 已提交
1503 1504 1505 1506 1507
			/*
			 * disable GART TBL walk error reporting, which
			 * trips off incorrectly with the IOMMU & 3ware
			 * & Cerberus:
			 */
1508
			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
I
Ingo Molnar 已提交
1509
		}
1510
		if (c->x86 <= 17 && cfg->bootlog < 0) {
I
Ingo Molnar 已提交
1511 1512 1513 1514
			/*
			 * Lots of broken BIOS around that don't clear them
			 * by default and leave crap in there. Don't log:
			 */
1515
			cfg->bootlog = 0;
I
Ingo Molnar 已提交
1516
		}
1517 1518 1519 1520
		/*
		 * Various K7s with broken bank 0 around. Always disable
		 * by default.
		 */
1521
		 if (c->x86 == 6 && cfg->banks > 0)
1522
			mce_banks[0].ctl = 0;
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549

		 /*
		  * Turn off MC4_MISC thresholding banks on those models since
		  * they're not supported there.
		  */
		 if (c->x86 == 0x15 &&
		     (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
			 int i;
			 u64 val, hwcr;
			 bool need_toggle;
			 u32 msrs[] = {
				0x00000413, /* MC4_MISC0 */
				0xc0000408, /* MC4_MISC1 */
			 };

			 rdmsrl(MSR_K7_HWCR, hwcr);

			 /* McStatusWrEn has to be set */
			 need_toggle = !(hwcr & BIT(18));

			 if (need_toggle)
				 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));

			 for (i = 0; i < ARRAY_SIZE(msrs); i++) {
				 rdmsrl(msrs[i], val);

				 /* CntP bit set? */
B
Borislav Petkov 已提交
1550 1551 1552
				 if (val & BIT_64(62)) {
					val &= ~BIT_64(62);
					wrmsrl(msrs[i], val);
1553 1554 1555 1556 1557 1558 1559
				 }
			 }

			 /* restore old settings */
			 if (need_toggle)
				 wrmsrl(MSR_K7_HWCR, hwcr);
		 }
L
Linus Torvalds 已提交
1560
	}
1561

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		/*
		 * SDM documents that on family 6 bank 0 should not be written
		 * because it aliases to another special BIOS controlled
		 * register.
		 * But it's not aliased anymore on model 0x1a+
		 * Don't ignore bank 0 completely because there could be a
		 * valid event later, merely don't write CTL0.
		 */

1572
		if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1573
			mce_banks[0].init = 0;
1574 1575 1576 1577 1578 1579

		/*
		 * All newer Intel systems support MCE broadcasting. Enable
		 * synchronization with a one second timeout.
		 */
		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1580 1581
			cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
1582

1583 1584 1585 1586
		/*
		 * There are also broken BIOSes on some Pentium M and
		 * earlier systems:
		 */
1587 1588
		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
			cfg->bootlog = 0;
1589 1590 1591

		if (c->x86 == 6 && c->x86_model == 45)
			quirk_no_way_out = quirk_sandybridge_ifu;
1592
	}
1593 1594 1595
	if (cfg->monarch_timeout < 0)
		cfg->monarch_timeout = 0;
	if (cfg->bootlog != 0)
1596
		cfg->panic_timeout = 30;
1597 1598

	return 0;
1599
}
L
Linus Torvalds 已提交
1600

1601
static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1602 1603
{
	if (c->x86 != 5)
1604 1605
		return 0;

1606 1607
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
1608
		intel_p5_mcheck_init(c);
1609
		return 1;
1610 1611 1612
		break;
	case X86_VENDOR_CENTAUR:
		winchip_mcheck_init(c);
1613
		return 1;
1614 1615
		break;
	}
1616 1617

	return 0;
1618 1619
}

1620
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1621 1622 1623 1624
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_init(c);
C
Chen Gong 已提交
1625
		mce_adjust_timer = mce_intel_adjust_timer;
L
Linus Torvalds 已提交
1626
		break;
1627 1628 1629
	case X86_VENDOR_AMD:
		mce_amd_feature_init(c);
		break;
L
Linus Torvalds 已提交
1630 1631 1632 1633 1634
	default:
		break;
	}
}

T
Thomas Gleixner 已提交
1635
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1636
{
C
Chen Gong 已提交
1637
	unsigned long iv = mce_adjust_timer(check_interval * HZ);
1638

T
Thomas Gleixner 已提交
1639
	__this_cpu_write(mce_next_interval, iv);
1640

1641
	if (mca_cfg.ignore_ce || !iv)
1642 1643
		return;

T
Thomas Gleixner 已提交
1644
	t->expires = round_jiffies(jiffies + iv);
1645
	add_timer_on(t, smp_processor_id());
1646 1647
}

T
Thomas Gleixner 已提交
1648 1649 1650 1651 1652 1653 1654 1655 1656
static void __mcheck_cpu_init_timer(void)
{
	struct timer_list *t = &__get_cpu_var(mce_timer);
	unsigned int cpu = smp_processor_id();

	setup_timer(t, mce_timer_fn, cpu);
	mce_start_timer(cpu, t);
}

A
Andi Kleen 已提交
1657 1658 1659
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
1660
	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
A
Andi Kleen 已提交
1661 1662 1663 1664 1665 1666 1667
	       smp_processor_id());
}

/* Call the installed machine check handler for this CPU setup. */
void (*machine_check_vector)(struct pt_regs *, long error_code) =
						unexpected_machine_check;

1668
/*
L
Linus Torvalds 已提交
1669
 * Called for each booted CPU to set up machine checks.
I
Ingo Molnar 已提交
1670
 * Must be called with preempt off:
L
Linus Torvalds 已提交
1671
 */
1672
void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1673
{
1674
	if (mca_cfg.disabled)
1675 1676
		return;

1677 1678
	if (__mcheck_cpu_ancient_init(c))
		return;
1679

1680
	if (!mce_available(c))
L
Linus Torvalds 已提交
1681 1682
		return;

1683
	if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1684
		mca_cfg.disabled = true;
1685 1686 1687
		return;
	}

1688 1689
	machine_check_vector = do_machine_check;

1690 1691 1692
	__mcheck_cpu_init_generic();
	__mcheck_cpu_init_vendor(c);
	__mcheck_cpu_init_timer();
1693
	INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
1694
	init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
L
Linus Torvalds 已提交
1695 1696 1697
}

/*
1698
 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
L
Linus Torvalds 已提交
1699 1700
 */

1701 1702 1703
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
static int mce_chrdev_open_count;	/* #times opened */
static int mce_chrdev_open_exclu;	/* already open exclusive? */
T
Tim Hockin 已提交
1704

1705
static int mce_chrdev_open(struct inode *inode, struct file *file)
T
Tim Hockin 已提交
1706
{
1707
	spin_lock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1708

1709 1710 1711
	if (mce_chrdev_open_exclu ||
	    (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
		spin_unlock(&mce_chrdev_state_lock);
I
Ingo Molnar 已提交
1712

T
Tim Hockin 已提交
1713 1714 1715 1716
		return -EBUSY;
	}

	if (file->f_flags & O_EXCL)
1717 1718
		mce_chrdev_open_exclu = 1;
	mce_chrdev_open_count++;
T
Tim Hockin 已提交
1719

1720
	spin_unlock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1721

1722
	return nonseekable_open(inode, file);
T
Tim Hockin 已提交
1723 1724
}

1725
static int mce_chrdev_release(struct inode *inode, struct file *file)
T
Tim Hockin 已提交
1726
{
1727
	spin_lock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1728

1729 1730
	mce_chrdev_open_count--;
	mce_chrdev_open_exclu = 0;
T
Tim Hockin 已提交
1731

1732
	spin_unlock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1733 1734 1735 1736

	return 0;
}

1737 1738
static void collect_tscs(void *data)
{
L
Linus Torvalds 已提交
1739
	unsigned long *cpu_tsc = (unsigned long *)data;
1740

L
Linus Torvalds 已提交
1741
	rdtscll(cpu_tsc[smp_processor_id()]);
1742
}
L
Linus Torvalds 已提交
1743

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
static int mce_apei_read_done;

/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
static int __mce_read_apei(char __user **ubuf, size_t usize)
{
	int rc;
	u64 record_id;
	struct mce m;

	if (usize < sizeof(struct mce))
		return -EINVAL;

	rc = apei_read_mce(&m, &record_id);
	/* Error or no more MCE record */
	if (rc <= 0) {
		mce_apei_read_done = 1;
1760 1761 1762 1763 1764 1765
		/*
		 * When ERST is disabled, mce_chrdev_read() should return
		 * "no record" instead of "no device."
		 */
		if (rc == -ENODEV)
			return 0;
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
		return rc;
	}
	rc = -EFAULT;
	if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
		return rc;
	/*
	 * In fact, we should have cleared the record after that has
	 * been flushed to the disk or sent to network in
	 * /sbin/mcelog, but we have no interface to support that now,
	 * so just clear it to avoid duplication.
	 */
	rc = apei_clear_mce(record_id);
	if (rc) {
		mce_apei_read_done = 1;
		return rc;
	}
	*ubuf += sizeof(struct mce);

	return 0;
}

1787 1788
static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
				size_t usize, loff_t *off)
L
Linus Torvalds 已提交
1789
{
I
Ingo Molnar 已提交
1790
	char __user *buf = ubuf;
1791
	unsigned long *cpu_tsc;
1792
	unsigned prev, next;
L
Linus Torvalds 已提交
1793 1794
	int i, err;

1795
	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1796 1797 1798
	if (!cpu_tsc)
		return -ENOMEM;

1799
	mutex_lock(&mce_chrdev_read_mutex);
1800 1801 1802 1803 1804 1805 1806

	if (!mce_apei_read_done) {
		err = __mce_read_apei(&buf, usize);
		if (err || buf != ubuf)
			goto out;
	}

1807
	next = rcu_dereference_check_mce(mcelog.next);
L
Linus Torvalds 已提交
1808 1809

	/* Only supports full reads right now */
1810 1811 1812
	err = -EINVAL;
	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
		goto out;
L
Linus Torvalds 已提交
1813 1814

	err = 0;
1815 1816 1817 1818
	prev = 0;
	do {
		for (i = prev; i < next; i++) {
			unsigned long start = jiffies;
H
Hidetoshi Seto 已提交
1819
			struct mce *m = &mcelog.entry[i];
1820

H
Hidetoshi Seto 已提交
1821
			while (!m->finished) {
1822
				if (time_after_eq(jiffies, start + 2)) {
H
Hidetoshi Seto 已提交
1823
					memset(m, 0, sizeof(*m));
1824 1825 1826
					goto timeout;
				}
				cpu_relax();
1827
			}
1828
			smp_rmb();
H
Hidetoshi Seto 已提交
1829 1830
			err |= copy_to_user(buf, m, sizeof(*m));
			buf += sizeof(*m);
1831 1832
timeout:
			;
1833
		}
L
Linus Torvalds 已提交
1834

1835 1836 1837 1838 1839
		memset(mcelog.entry + prev, 0,
		       (next - prev) * sizeof(struct mce));
		prev = next;
		next = cmpxchg(&mcelog.next, prev, 0);
	} while (next != prev);
L
Linus Torvalds 已提交
1840

1841
	synchronize_sched();
L
Linus Torvalds 已提交
1842

1843 1844 1845 1846
	/*
	 * Collect entries that were still getting written before the
	 * synchronize.
	 */
1847
	on_each_cpu(collect_tscs, cpu_tsc, 1);
I
Ingo Molnar 已提交
1848

1849
	for (i = next; i < MCE_LOG_LEN; i++) {
H
Hidetoshi Seto 已提交
1850 1851 1852 1853
		struct mce *m = &mcelog.entry[i];

		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
			err |= copy_to_user(buf, m, sizeof(*m));
L
Linus Torvalds 已提交
1854
			smp_rmb();
H
Hidetoshi Seto 已提交
1855 1856
			buf += sizeof(*m);
			memset(m, 0, sizeof(*m));
L
Linus Torvalds 已提交
1857
		}
1858
	}
1859 1860 1861 1862 1863

	if (err)
		err = -EFAULT;

out:
1864
	mutex_unlock(&mce_chrdev_read_mutex);
1865
	kfree(cpu_tsc);
I
Ingo Molnar 已提交
1866

1867
	return err ? err : buf - ubuf;
L
Linus Torvalds 已提交
1868 1869
}

1870
static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1871
{
1872
	poll_wait(file, &mce_chrdev_wait, wait);
1873
	if (rcu_access_index(mcelog.next))
1874
		return POLLIN | POLLRDNORM;
1875 1876
	if (!mce_apei_read_done && apei_check_mce())
		return POLLIN | POLLRDNORM;
1877 1878 1879
	return 0;
}

1880 1881
static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
				unsigned long arg)
L
Linus Torvalds 已提交
1882 1883
{
	int __user *p = (int __user *)arg;
1884

L
Linus Torvalds 已提交
1885
	if (!capable(CAP_SYS_ADMIN))
1886
		return -EPERM;
I
Ingo Molnar 已提交
1887

L
Linus Torvalds 已提交
1888
	switch (cmd) {
1889
	case MCE_GET_RECORD_LEN:
L
Linus Torvalds 已提交
1890 1891
		return put_user(sizeof(struct mce), p);
	case MCE_GET_LOG_LEN:
1892
		return put_user(MCE_LOG_LEN, p);
L
Linus Torvalds 已提交
1893 1894
	case MCE_GETCLEAR_FLAGS: {
		unsigned flags;
1895 1896

		do {
L
Linus Torvalds 已提交
1897
			flags = mcelog.flags;
1898
		} while (cmpxchg(&mcelog.flags, flags, 0) != flags);
I
Ingo Molnar 已提交
1899

1900
		return put_user(flags, p);
L
Linus Torvalds 已提交
1901 1902
	}
	default:
1903 1904
		return -ENOTTY;
	}
L
Linus Torvalds 已提交
1905 1906
}

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
			    size_t usize, loff_t *off);

void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
			     const char __user *ubuf,
			     size_t usize, loff_t *off))
{
	mce_write = fn;
}
EXPORT_SYMBOL_GPL(register_mce_write_callback);

ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
			 size_t usize, loff_t *off)
{
	if (mce_write)
		return mce_write(filp, ubuf, usize, off);
	else
		return -EINVAL;
}

static const struct file_operations mce_chrdev_ops = {
1928 1929 1930
	.open			= mce_chrdev_open,
	.release		= mce_chrdev_release,
	.read			= mce_chrdev_read,
1931
	.write			= mce_chrdev_write,
1932 1933 1934
	.poll			= mce_chrdev_poll,
	.unlocked_ioctl		= mce_chrdev_ioctl,
	.llseek			= no_llseek,
L
Linus Torvalds 已提交
1935 1936
};

1937
static struct miscdevice mce_chrdev_device = {
L
Linus Torvalds 已提交
1938 1939 1940 1941 1942
	MISC_MCELOG_MINOR,
	"mcelog",
	&mce_chrdev_ops,
};

H
Hidetoshi Seto 已提交
1943
/*
1944 1945 1946 1947
 * mce=off Disables machine check
 * mce=no_cmci Disables CMCI
 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1948 1949 1950
 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
 *	monarchtimeout is how long to wait for other CPUs on machine
 *	check, or 0 to not wait
H
Hidetoshi Seto 已提交
1951 1952
 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
 * mce=nobootlog Don't log MCEs from before booting.
1953
 * mce=bios_cmci_threshold Don't program the CMCI threshold
H
Hidetoshi Seto 已提交
1954
 */
L
Linus Torvalds 已提交
1955 1956
static int __init mcheck_enable(char *str)
{
1957 1958
	struct mca_config *cfg = &mca_cfg;

1959
	if (*str == 0) {
1960
		enable_p5_mce();
1961 1962
		return 1;
	}
1963 1964
	if (*str == '=')
		str++;
L
Linus Torvalds 已提交
1965
	if (!strcmp(str, "off"))
1966
		cfg->disabled = true;
1967
	else if (!strcmp(str, "no_cmci"))
1968
		cfg->cmci_disabled = true;
1969
	else if (!strcmp(str, "dont_log_ce"))
1970
		cfg->dont_log_ce = true;
1971
	else if (!strcmp(str, "ignore_ce"))
1972
		cfg->ignore_ce = true;
H
Hidetoshi Seto 已提交
1973
	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1974
		cfg->bootlog = (str[0] == 'b');
1975
	else if (!strcmp(str, "bios_cmci_threshold"))
1976
		cfg->bios_cmci_threshold = true;
1977
	else if (isdigit(str[0])) {
1978
		get_option(&str, &(cfg->tolerant));
1979 1980
		if (*str == ',') {
			++str;
1981
			get_option(&str, &(cfg->monarch_timeout));
1982 1983
		}
	} else {
1984
		pr_info("mce argument %s ignored. Please use /sys\n", str);
H
Hidetoshi Seto 已提交
1985 1986
		return 0;
	}
1987
	return 1;
L
Linus Torvalds 已提交
1988
}
1989
__setup("mce", mcheck_enable);
L
Linus Torvalds 已提交
1990

1991
int __init mcheck_init(void)
1992
{
1993 1994
	mcheck_intel_therm_init();

1995 1996 1997
	return 0;
}

1998
/*
1999
 * mce_syscore: PM support
2000
 */
L
Linus Torvalds 已提交
2001

2002 2003 2004 2005
/*
 * Disable machine checks on suspend and shutdown. We can't really handle
 * them later.
 */
2006
static int mce_disable_error_reporting(void)
2007 2008 2009
{
	int i;

2010
	for (i = 0; i < mca_cfg.banks; i++) {
2011
		struct mce_bank *b = &mce_banks[i];
2012

2013
		if (b->init)
2014
			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2015
	}
2016 2017 2018
	return 0;
}

2019
static int mce_syscore_suspend(void)
2020
{
2021
	return mce_disable_error_reporting();
2022 2023
}

2024
static void mce_syscore_shutdown(void)
2025
{
2026
	mce_disable_error_reporting();
2027 2028
}

I
Ingo Molnar 已提交
2029 2030 2031 2032 2033
/*
 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
 * Only one CPU is active at this time, the others get re-added later using
 * CPU hotplug:
 */
2034
static void mce_syscore_resume(void)
L
Linus Torvalds 已提交
2035
{
2036
	__mcheck_cpu_init_generic();
2037
	__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
L
Linus Torvalds 已提交
2038 2039
}

2040
static struct syscore_ops mce_syscore_ops = {
2041 2042 2043
	.suspend	= mce_syscore_suspend,
	.shutdown	= mce_syscore_shutdown,
	.resume		= mce_syscore_resume,
2044 2045
};

2046
/*
2047
 * mce_device: Sysfs support
2048 2049
 */

2050 2051
static void mce_cpu_restart(void *data)
{
2052
	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2053
		return;
2054 2055
	__mcheck_cpu_init_generic();
	__mcheck_cpu_init_timer();
2056 2057
}

L
Linus Torvalds 已提交
2058
/* Reinit MCEs after user configuration changes */
2059 2060
static void mce_restart(void)
{
2061
	mce_timer_delete_all();
2062
	on_each_cpu(mce_cpu_restart, NULL, 1);
L
Linus Torvalds 已提交
2063 2064
}

2065
/* Toggle features for corrected errors */
2066
static void mce_disable_cmci(void *data)
2067
{
2068
	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2069 2070 2071 2072 2073 2074
		return;
	cmci_clear();
}

static void mce_enable_ce(void *all)
{
2075
	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2076 2077 2078 2079
		return;
	cmci_reenable();
	cmci_recheck();
	if (all)
2080
		__mcheck_cpu_init_timer();
2081 2082
}

2083
static struct bus_type mce_subsys = {
I
Ingo Molnar 已提交
2084
	.name		= "machinecheck",
2085
	.dev_name	= "machinecheck",
L
Linus Torvalds 已提交
2086 2087
};

2088
DEFINE_PER_CPU(struct device *, mce_device);
I
Ingo Molnar 已提交
2089 2090 2091

__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
L
Linus Torvalds 已提交
2092

2093
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2094 2095 2096
{
	return container_of(attr, struct mce_bank, attr);
}
2097

2098
static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2099 2100
			 char *buf)
{
2101
	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2102 2103
}

2104
static ssize_t set_bank(struct device *s, struct device_attribute *attr,
H
Hidetoshi Seto 已提交
2105
			const char *buf, size_t size)
2106
{
H
Hidetoshi Seto 已提交
2107
	u64 new;
I
Ingo Molnar 已提交
2108

H
Hidetoshi Seto 已提交
2109
	if (strict_strtoull(buf, 0, &new) < 0)
2110
		return -EINVAL;
I
Ingo Molnar 已提交
2111

2112
	attr_to_bank(attr)->ctl = new;
2113
	mce_restart();
I
Ingo Molnar 已提交
2114

H
Hidetoshi Seto 已提交
2115
	return size;
2116
}
2117

I
Ingo Molnar 已提交
2118
static ssize_t
2119
show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2120
{
2121
	strcpy(buf, mce_helper);
2122
	strcat(buf, "\n");
2123
	return strlen(mce_helper) + 1;
2124 2125
}

2126
static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
I
Ingo Molnar 已提交
2127
				const char *buf, size_t siz)
2128 2129
{
	char *p;
I
Ingo Molnar 已提交
2130

2131 2132 2133
	strncpy(mce_helper, buf, sizeof(mce_helper));
	mce_helper[sizeof(mce_helper)-1] = 0;
	p = strchr(mce_helper, '\n');
I
Ingo Molnar 已提交
2134

2135
	if (p)
I
Ingo Molnar 已提交
2136 2137
		*p = 0;

2138
	return strlen(mce_helper) + !!p;
2139 2140
}

2141 2142
static ssize_t set_ignore_ce(struct device *s,
			     struct device_attribute *attr,
2143 2144 2145 2146 2147 2148 2149
			     const char *buf, size_t size)
{
	u64 new;

	if (strict_strtoull(buf, 0, &new) < 0)
		return -EINVAL;

2150
	if (mca_cfg.ignore_ce ^ !!new) {
2151 2152
		if (new) {
			/* disable ce features */
2153 2154
			mce_timer_delete_all();
			on_each_cpu(mce_disable_cmci, NULL, 1);
2155
			mca_cfg.ignore_ce = true;
2156 2157
		} else {
			/* enable ce features */
2158
			mca_cfg.ignore_ce = false;
2159 2160 2161 2162 2163 2164
			on_each_cpu(mce_enable_ce, (void *)1, 1);
		}
	}
	return size;
}

2165 2166
static ssize_t set_cmci_disabled(struct device *s,
				 struct device_attribute *attr,
2167 2168 2169 2170 2171 2172 2173
				 const char *buf, size_t size)
{
	u64 new;

	if (strict_strtoull(buf, 0, &new) < 0)
		return -EINVAL;

2174
	if (mca_cfg.cmci_disabled ^ !!new) {
2175 2176
		if (new) {
			/* disable cmci */
2177
			on_each_cpu(mce_disable_cmci, NULL, 1);
2178
			mca_cfg.cmci_disabled = true;
2179 2180
		} else {
			/* enable cmci */
2181
			mca_cfg.cmci_disabled = false;
2182 2183 2184 2185 2186 2187
			on_each_cpu(mce_enable_ce, NULL, 1);
		}
	}
	return size;
}

2188 2189
static ssize_t store_int_with_restart(struct device *s,
				      struct device_attribute *attr,
2190 2191
				      const char *buf, size_t size)
{
2192
	ssize_t ret = device_store_int(s, attr, buf, size);
2193 2194 2195 2196
	mce_restart();
	return ret;
}

2197
static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2198
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2199
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2200
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
I
Ingo Molnar 已提交
2201

2202 2203
static struct dev_ext_attribute dev_attr_check_interval = {
	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2204 2205
	&check_interval
};
I
Ingo Molnar 已提交
2206

2207
static struct dev_ext_attribute dev_attr_ignore_ce = {
2208 2209
	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
	&mca_cfg.ignore_ce
2210 2211
};

2212
static struct dev_ext_attribute dev_attr_cmci_disabled = {
2213 2214
	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
	&mca_cfg.cmci_disabled
2215 2216
};

2217 2218 2219 2220 2221 2222 2223 2224
static struct device_attribute *mce_device_attrs[] = {
	&dev_attr_tolerant.attr,
	&dev_attr_check_interval.attr,
	&dev_attr_trigger,
	&dev_attr_monarch_timeout.attr,
	&dev_attr_dont_log_ce.attr,
	&dev_attr_ignore_ce.attr,
	&dev_attr_cmci_disabled.attr,
2225 2226
	NULL
};
L
Linus Torvalds 已提交
2227

2228
static cpumask_var_t mce_device_initialized;
2229

2230 2231 2232 2233 2234
static void mce_device_release(struct device *dev)
{
	kfree(dev);
}

2235 2236
/* Per cpu device init. All of the cpus still share the same ctrl bank: */
static __cpuinit int mce_device_create(unsigned int cpu)
L
Linus Torvalds 已提交
2237
{
2238
	struct device *dev;
L
Linus Torvalds 已提交
2239
	int err;
2240
	int i, j;
2241

A
Andreas Herrmann 已提交
2242
	if (!mce_available(&boot_cpu_data))
2243 2244
		return -EIO;

2245 2246 2247
	dev = kzalloc(sizeof *dev, GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
2248 2249
	dev->id  = cpu;
	dev->bus = &mce_subsys;
2250
	dev->release = &mce_device_release;
2251

2252
	err = device_register(dev);
2253 2254 2255
	if (err)
		return err;

2256 2257
	for (i = 0; mce_device_attrs[i]; i++) {
		err = device_create_file(dev, mce_device_attrs[i]);
2258 2259 2260
		if (err)
			goto error;
	}
2261
	for (j = 0; j < mca_cfg.banks; j++) {
2262
		err = device_create_file(dev, &mce_banks[j].attr);
2263 2264 2265
		if (err)
			goto error2;
	}
2266
	cpumask_set_cpu(cpu, mce_device_initialized);
2267
	per_cpu(mce_device, cpu) = dev;
2268

2269
	return 0;
2270
error2:
2271
	while (--j >= 0)
2272
		device_remove_file(dev, &mce_banks[j].attr);
2273
error:
I
Ingo Molnar 已提交
2274
	while (--i >= 0)
2275
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2276

2277
	device_unregister(dev);
2278

2279 2280 2281
	return err;
}

2282
static __cpuinit void mce_device_remove(unsigned int cpu)
2283
{
2284
	struct device *dev = per_cpu(mce_device, cpu);
2285 2286
	int i;

2287
	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2288 2289
		return;

2290 2291
	for (i = 0; mce_device_attrs[i]; i++)
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2292

2293
	for (i = 0; i < mca_cfg.banks; i++)
2294
		device_remove_file(dev, &mce_banks[i].attr);
I
Ingo Molnar 已提交
2295

2296 2297
	device_unregister(dev);
	cpumask_clear_cpu(cpu, mce_device_initialized);
2298
	per_cpu(mce_device, cpu) = NULL;
2299 2300
}

2301
/* Make sure there are no machine checks on offlined CPUs. */
2302
static void __cpuinit mce_disable_cpu(void *h)
2303
{
A
Andi Kleen 已提交
2304
	unsigned long action = *(unsigned long *)h;
I
Ingo Molnar 已提交
2305
	int i;
2306

2307
	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2308
		return;
2309

A
Andi Kleen 已提交
2310 2311
	if (!(action & CPU_TASKS_FROZEN))
		cmci_clear();
2312
	for (i = 0; i < mca_cfg.banks; i++) {
2313
		struct mce_bank *b = &mce_banks[i];
2314

2315
		if (b->init)
2316
			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2317
	}
2318 2319
}

2320
static void __cpuinit mce_reenable_cpu(void *h)
2321
{
A
Andi Kleen 已提交
2322
	unsigned long action = *(unsigned long *)h;
I
Ingo Molnar 已提交
2323
	int i;
2324

2325
	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2326
		return;
I
Ingo Molnar 已提交
2327

A
Andi Kleen 已提交
2328 2329
	if (!(action & CPU_TASKS_FROZEN))
		cmci_reenable();
2330
	for (i = 0; i < mca_cfg.banks; i++) {
2331
		struct mce_bank *b = &mce_banks[i];
2332

2333
		if (b->init)
2334
			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
2335
	}
2336 2337
}

2338
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
I
Ingo Molnar 已提交
2339 2340
static int __cpuinit
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2341 2342
{
	unsigned int cpu = (unsigned long)hcpu;
2343
	struct timer_list *t = &per_cpu(mce_timer, cpu);
2344

2345
	switch (action & ~CPU_TASKS_FROZEN) {
2346
	case CPU_ONLINE:
2347
		mce_device_create(cpu);
2348 2349
		if (threshold_cpu_callback)
			threshold_cpu_callback(action, cpu);
2350 2351
		break;
	case CPU_DEAD:
2352 2353
		if (threshold_cpu_callback)
			threshold_cpu_callback(action, cpu);
2354
		mce_device_remove(cpu);
C
Chen Gong 已提交
2355
		mce_intel_hcpu_update(cpu);
2356
		break;
2357
	case CPU_DOWN_PREPARE:
A
Andi Kleen 已提交
2358
		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
C
Chen Gong 已提交
2359
		del_timer_sync(t);
2360 2361
		break;
	case CPU_DOWN_FAILED:
A
Andi Kleen 已提交
2362
		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
T
Thomas Gleixner 已提交
2363
		mce_start_timer(cpu, t);
A
Andi Kleen 已提交
2364
		break;
2365 2366 2367
	}

	if (action == CPU_POST_DEAD) {
A
Andi Kleen 已提交
2368 2369
		/* intentionally ignoring frozen here */
		cmci_rediscover(cpu);
2370
	}
2371

2372
	return NOTIFY_OK;
2373 2374
}

2375
static struct notifier_block mce_cpu_notifier __cpuinitdata = {
2376 2377 2378
	.notifier_call = mce_cpu_callback,
};

2379
static __init void mce_init_banks(void)
2380 2381 2382
{
	int i;

2383
	for (i = 0; i < mca_cfg.banks; i++) {
2384
		struct mce_bank *b = &mce_banks[i];
2385
		struct device_attribute *a = &b->attr;
I
Ingo Molnar 已提交
2386

2387
		sysfs_attr_init(&a->attr);
2388 2389
		a->attr.name	= b->attrname;
		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
I
Ingo Molnar 已提交
2390 2391 2392 2393

		a->attr.mode	= 0644;
		a->show		= show_bank;
		a->store	= set_bank;
2394 2395 2396
	}
}

2397
static __init int mcheck_init_device(void)
2398 2399 2400 2401
{
	int err;
	int i = 0;

L
Linus Torvalds 已提交
2402 2403
	if (!mce_available(&boot_cpu_data))
		return -EIO;
2404

2405
	zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
2406

2407
	mce_init_banks();
2408

2409
	err = subsys_system_register(&mce_subsys, NULL);
2410 2411
	if (err)
		return err;
2412 2413

	for_each_online_cpu(i) {
2414
		err = mce_device_create(i);
2415 2416
		if (err)
			return err;
2417 2418
	}

2419
	register_syscore_ops(&mce_syscore_ops);
2420
	register_hotcpu_notifier(&mce_cpu_notifier);
2421 2422 2423

	/* register character device /dev/mcelog */
	misc_register(&mce_chrdev_device);
I
Ingo Molnar 已提交
2424

L
Linus Torvalds 已提交
2425 2426
	return err;
}
2427
device_initcall_sync(mcheck_init_device);
I
Ingo Molnar 已提交
2428

2429 2430 2431 2432 2433
/*
 * Old style boot options parsing. Only for compatibility.
 */
static int __init mcheck_disable(char *str)
{
2434
	mca_cfg.disabled = true;
2435 2436 2437
	return 1;
}
__setup("nomce", mcheck_disable);
I
Ingo Molnar 已提交
2438

2439 2440
#ifdef CONFIG_DEBUG_FS
struct dentry *mce_get_debugfs_dir(void)
I
Ingo Molnar 已提交
2441
{
2442
	static struct dentry *dmce;
I
Ingo Molnar 已提交
2443

2444 2445
	if (!dmce)
		dmce = debugfs_create_dir("mce", NULL);
I
Ingo Molnar 已提交
2446

2447 2448
	return dmce;
}
I
Ingo Molnar 已提交
2449

2450 2451 2452 2453 2454 2455 2456 2457
static void mce_reset(void)
{
	cpu_missing = 0;
	atomic_set(&mce_fake_paniced, 0);
	atomic_set(&mce_executing, 0);
	atomic_set(&mce_callin, 0);
	atomic_set(&global_nwo, 0);
}
I
Ingo Molnar 已提交
2458

2459 2460 2461 2462
static int fake_panic_get(void *data, u64 *val)
{
	*val = fake_panic;
	return 0;
I
Ingo Molnar 已提交
2463 2464
}

2465
static int fake_panic_set(void *data, u64 val)
I
Ingo Molnar 已提交
2466
{
2467 2468 2469
	mce_reset();
	fake_panic = val;
	return 0;
I
Ingo Molnar 已提交
2470 2471
}

2472 2473
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
			fake_panic_set, "%llu\n");
2474

2475
static int __init mcheck_debugfs_init(void)
2476
{
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	struct dentry *dmce, *ffake_panic;

	dmce = mce_get_debugfs_dir();
	if (!dmce)
		return -ENOMEM;
	ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
					  &fake_panic_fops);
	if (!ffake_panic)
		return -ENOMEM;

	return 0;
2488
}
2489
late_initcall(mcheck_debugfs_init);
2490
#endif