mce.c 61.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Machine check handler.
I
Ingo Molnar 已提交
3
 *
L
Linus Torvalds 已提交
4
 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 6
 * Rest from unknown author(s).
 * 2004 Andi Kleen. Rewrote most of it.
7 8
 * Copyright 2008 Intel Corporation
 * Author: Andi Kleen
L
Linus Torvalds 已提交
9
 */
10 11 12

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

I
Ingo Molnar 已提交
13 14 15 16 17 18 19
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/ratelimit.h>
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/kobject.h>
20
#include <linux/uaccess.h>
I
Ingo Molnar 已提交
21 22 23
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
24
#include <linux/string.h>
25
#include <linux/device.h>
26
#include <linux/syscore_ops.h>
27
#include <linux/delay.h>
28
#include <linux/ctype.h>
I
Ingo Molnar 已提交
29
#include <linux/sched.h>
30
#include <linux/sysfs.h>
I
Ingo Molnar 已提交
31
#include <linux/types.h>
32
#include <linux/slab.h>
I
Ingo Molnar 已提交
33 34 35
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/poll.h>
36
#include <linux/nmi.h>
I
Ingo Molnar 已提交
37
#include <linux/cpu.h>
38
#include <linux/smp.h>
I
Ingo Molnar 已提交
39
#include <linux/fs.h>
40
#include <linux/mm.h>
41
#include <linux/debugfs.h>
42
#include <linux/irq_work.h>
43
#include <linux/export.h>
44
#include <linux/jump_label.h>
I
Ingo Molnar 已提交
45

46
#include <asm/intel-family.h>
47
#include <asm/processor.h>
48
#include <asm/traps.h>
A
Andy Lutomirski 已提交
49
#include <asm/tlbflush.h>
I
Ingo Molnar 已提交
50 51
#include <asm/mce.h>
#include <asm/msr.h>
L
Linus Torvalds 已提交
52

53
#include "mce-internal.h"
54

55
static DEFINE_MUTEX(mce_chrdev_read_mutex);
56

57
#define mce_log_get_idx_check(p) \
58
({ \
59 60
	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
			 !lockdep_is_held(&mce_chrdev_read_mutex), \
61
			 "suspicious mce_log_get_idx_check() usage"); \
62 63
	smp_load_acquire(&(p)); \
})
64

65 66 67
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

68
#define SPINUNIT		100	/* 100ns */
69

70 71
DEFINE_PER_CPU(unsigned, mce_exception_count);

72
struct mce_bank *mce_banks __read_mostly;
73
struct mce_vendor_flags mce_flags __read_mostly;
74

75
struct mca_config mca_cfg __read_mostly = {
76
	.bootlog  = -1,
77 78 79 80 81 82 83
	/*
	 * Tolerant levels:
	 * 0: always panic on uncorrected errors, log corrected errors
	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
	 * 3: never panic or SIGBUS, log all errors (for testing only)
	 */
84 85
	.tolerant = 1,
	.monarch_timeout = -1
86 87
};

88 89 90 91
/* User mode helper program triggered by machine check event */
static unsigned long		mce_need_notify;
static char			mce_helper[128];
static char			*mce_helper_argv[2] = { mce_helper, NULL };
L
Linus Torvalds 已提交
92

93 94
static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);

95 96 97
static DEFINE_PER_CPU(struct mce, mces_seen);
static int			cpu_missing;

98 99 100 101
/*
 * MCA banks polled by the period polling timer for corrected events.
 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 */
102 103 104 105
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
};

106 107 108 109 110 111 112 113 114
/*
 * MCA banks controlled through firmware first for corrected errors.
 * This is a global list of banks for which we won't enable CMCI and we
 * won't poll. Firmware controls these banks and is responsible for
 * reporting corrected errors through GHES. Uncorrected/recoverable
 * errors are still notified through a machine check.
 */
mce_banks_t mce_banks_ce_disabled;

115 116
static struct work_struct mce_work;
static struct irq_work mce_irq_work;
117

118 119
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);

120 121 122 123
/*
 * CPU/chipset specific EDAC code can register a notifier call here to print
 * MCE errors in a human-readable form.
 */
124
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
125

126 127 128 129
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
{
	memset(m, 0, sizeof(struct mce));
130
	m->cpu = m->extcpu = smp_processor_id();
131
	m->tsc = rdtsc();
132 133 134 135 136 137 138
	/* We hope get_seconds stays lockless */
	m->time = get_seconds();
	m->cpuvendor = boot_cpu_data.x86_vendor;
	m->cpuid = cpuid_eax(1);
	m->socketid = cpu_data(m->extcpu).phys_proc_id;
	m->apicid = cpu_data(m->extcpu).initial_apicid;
	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
139 140 141

	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
		rdmsrl(MSR_PPIN, m->ppin);
142 143
}

144 145 146
DEFINE_PER_CPU(struct mce, injectm);
EXPORT_PER_CPU_SYMBOL_GPL(injectm);

L
Linus Torvalds 已提交
147 148 149 150 151 152
/*
 * Lockless MCE logging infrastructure.
 * This avoids deadlocks on printk locks without having to break locks. Also
 * separate MCEs from kernel messages to avoid bogus bug reports.
 */

153
static struct mce_log mcelog = {
154 155 156
	.signature	= MCE_LOG_SIGNATURE,
	.len		= MCE_LOG_LEN,
	.recordlen	= sizeof(struct mce),
157
};
L
Linus Torvalds 已提交
158 159 160 161

void mce_log(struct mce *mce)
{
	unsigned next, entry;
I
Ingo Molnar 已提交
162

163 164 165
	/* Emit the trace record: */
	trace_mce_record(mce);

166 167
	if (!mce_gen_pool_add(mce))
		irq_work_queue(&mce_irq_work);
168

M
Mike Waychison 已提交
169
	wmb();
L
Linus Torvalds 已提交
170
	for (;;) {
171
		entry = mce_log_get_idx_check(mcelog.next);
172
		for (;;) {
173

I
Ingo Molnar 已提交
174 175 176 177 178
			/*
			 * When the buffer fills up discard new entries.
			 * Assume that the earlier errors are the more
			 * interesting ones:
			 */
179
			if (entry >= MCE_LOG_LEN) {
180 181
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
182 183
				return;
			}
I
Ingo Molnar 已提交
184
			/* Old left over entry. Skip: */
185 186 187 188
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
M
Mike Waychison 已提交
189
			break;
L
Linus Torvalds 已提交
190 191 192 193 194 195 196
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
M
Mike Waychison 已提交
197
	wmb();
L
Linus Torvalds 已提交
198
	mcelog.entry[entry].finished = 1;
M
Mike Waychison 已提交
199
	wmb();
L
Linus Torvalds 已提交
200

201
	set_bit(0, &mce_need_notify);
L
Linus Torvalds 已提交
202 203
}

204
void mce_inject_log(struct mce *m)
B
Borislav Petkov 已提交
205
{
206 207 208
	mutex_lock(&mce_chrdev_read_mutex);
	mce_log(m);
	mutex_unlock(&mce_chrdev_read_mutex);
B
Borislav Petkov 已提交
209
}
210
EXPORT_SYMBOL_GPL(mce_inject_log);
B
Borislav Petkov 已提交
211

212
static struct notifier_block mce_srao_nb;
B
Borislav Petkov 已提交
213

214 215
static atomic_t num_notifiers;

216 217
void mce_register_decode_chain(struct notifier_block *nb)
{
218 219
	atomic_inc(&num_notifiers);

220 221 222 223
	/* Ensure SRAO notifier has the highest priority in the decode chain. */
	if (nb != &mce_srao_nb && nb->priority == INT_MAX)
		nb->priority -= 1;

224 225 226 227 228 229
	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);

void mce_unregister_decode_chain(struct notifier_block *nb)
{
230 231
	atomic_dec(&num_notifiers);

232 233 234 235
	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
static inline u32 ctl_reg(int bank)
{
	return MSR_IA32_MCx_CTL(bank);
}

static inline u32 status_reg(int bank)
{
	return MSR_IA32_MCx_STATUS(bank);
}

static inline u32 addr_reg(int bank)
{
	return MSR_IA32_MCx_ADDR(bank);
}

static inline u32 misc_reg(int bank)
{
	return MSR_IA32_MCx_MISC(bank);
}

static inline u32 smca_ctl_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_CTL(bank);
}

static inline u32 smca_status_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_STATUS(bank);
}

static inline u32 smca_addr_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_ADDR(bank);
}

static inline u32 smca_misc_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_MISC(bank);
}

struct mca_msr_regs msr_ops = {
	.ctl	= ctl_reg,
	.status	= status_reg,
	.addr	= addr_reg,
	.misc	= misc_reg
};

283
static void __print_mce(struct mce *m)
L
Linus Torvalds 已提交
284
{
285 286 287 288
	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
		 m->extcpu,
		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
		 m->mcgstatus, m->bank, m->status);
289

290
	if (m->ip) {
H
Huang Ying 已提交
291
		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
292
			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
293
			m->cs, m->ip);
294

L
Linus Torvalds 已提交
295
		if (m->cs == __KERNEL_CS)
296
			print_symbol("{%s}", m->ip);
297
		pr_cont("\n");
L
Linus Torvalds 已提交
298
	}
299

H
Huang Ying 已提交
300
	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
L
Linus Torvalds 已提交
301
	if (m->addr)
302
		pr_cont("ADDR %llx ", m->addr);
L
Linus Torvalds 已提交
303
	if (m->misc)
304
		pr_cont("MISC %llx ", m->misc);
305

306 307 308 309 310 311 312
	if (mce_flags.smca) {
		if (m->synd)
			pr_cont("SYND %llx ", m->synd);
		if (m->ipid)
			pr_cont("IPID %llx ", m->ipid);
	}

313
	pr_cont("\n");
314 315 316 317
	/*
	 * Note this output is parsed by external tools and old fields
	 * should not be changed.
	 */
318
	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
319 320
		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
		cpu_data(m->extcpu).microcode);
321 322 323 324 325 326 327
}

static void print_mce(struct mce *m)
{
	int ret = 0;

	__print_mce(m);
328 329 330

	/*
	 * Print out human-readable details about the MCE error,
331
	 * (if the CPU has an implementation for that)
332
	 */
333 334 335 336 337
	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
	if (ret == NOTIFY_STOP)
		return;

	pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
338 339
}

340 341
#define PANIC_TIMEOUT 5 /* 5 seconds */

342
static atomic_t mce_panicked;
343

344
static int fake_panic;
345
static atomic_t mce_fake_panicked;
346

347 348 349 350
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
{
	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
351

352 353 354 355
	preempt_disable();
	local_irq_enable();
	while (timeout-- > 0)
		udelay(1);
356
	if (panic_timeout == 0)
357
		panic_timeout = mca_cfg.panic_timeout;
358 359 360
	panic("Panicing machine check CPU died");
}

361
static void mce_panic(const char *msg, struct mce *final, char *exp)
362
{
363 364 365
	int apei_err = 0;
	struct llist_node *pending;
	struct mce_evt_llist *l;
366

367 368 369 370
	if (!fake_panic) {
		/*
		 * Make sure only one CPU runs in machine check panic
		 */
371
		if (atomic_inc_return(&mce_panicked) > 1)
372 373
			wait_for_panic();
		barrier();
374

375 376 377 378
		bust_spinlocks(1);
		console_verbose();
	} else {
		/* Don't log too much for fake panic */
379
		if (atomic_inc_return(&mce_fake_panicked) > 1)
380 381
			return;
	}
382
	pending = mce_gen_pool_prepare_records();
383
	/* First print corrected ones that are still unlogged */
384 385
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
386
		if (!(m->status & MCI_STATUS_UC)) {
H
Hidetoshi Seto 已提交
387
			print_mce(m);
388 389 390
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
391 392
	}
	/* Now print uncorrected but with the final one last */
393 394
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
H
Hidetoshi Seto 已提交
395 396
		if (!(m->status & MCI_STATUS_UC))
			continue;
397
		if (!final || mce_cmp(m, final)) {
H
Hidetoshi Seto 已提交
398
			print_mce(m);
399 400 401
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
L
Linus Torvalds 已提交
402
	}
403
	if (final) {
H
Hidetoshi Seto 已提交
404
		print_mce(final);
405 406 407
		if (!apei_err)
			apei_err = apei_write_mce(final);
	}
408
	if (cpu_missing)
H
Huang Ying 已提交
409
		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
410
	if (exp)
H
Huang Ying 已提交
411
		pr_emerg(HW_ERR "Machine check: %s\n", exp);
412 413
	if (!fake_panic) {
		if (panic_timeout == 0)
414
			panic_timeout = mca_cfg.panic_timeout;
415 416
		panic(msg);
	} else
H
Huang Ying 已提交
417
		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
418
}
L
Linus Torvalds 已提交
419

420 421 422 423
/* Support code for software error injection */

static int msr_to_offset(u32 msr)
{
T
Tejun Heo 已提交
424
	unsigned bank = __this_cpu_read(injectm.bank);
425

426
	if (msr == mca_cfg.rip_msr)
427
		return offsetof(struct mce, ip);
428
	if (msr == msr_ops.status(bank))
429
		return offsetof(struct mce, status);
430
	if (msr == msr_ops.addr(bank))
431
		return offsetof(struct mce, addr);
432
	if (msr == msr_ops.misc(bank))
433 434 435 436 437 438
		return offsetof(struct mce, misc);
	if (msr == MSR_IA32_MCG_STATUS)
		return offsetof(struct mce, mcgstatus);
	return -1;
}

439 440 441 442
/* MSR access wrappers used for error injection */
static u64 mce_rdmsrl(u32 msr)
{
	u64 v;
443

T
Tejun Heo 已提交
444
	if (__this_cpu_read(injectm.finished)) {
445
		int offset = msr_to_offset(msr);
446

447 448
		if (offset < 0)
			return 0;
449
		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
450
	}
451 452

	if (rdmsrl_safe(msr, &v)) {
453
		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
454 455 456 457 458 459 460 461
		/*
		 * Return zero in case the access faulted. This should
		 * not happen normally but can happen if the CPU does
		 * something weird, or if the code is buggy.
		 */
		v = 0;
	}

462 463 464 465 466
	return v;
}

static void mce_wrmsrl(u32 msr, u64 v)
{
T
Tejun Heo 已提交
467
	if (__this_cpu_read(injectm.finished)) {
468
		int offset = msr_to_offset(msr);
469

470
		if (offset >= 0)
471
			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
472 473
		return;
	}
474 475 476
	wrmsrl(msr, v);
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
/*
 * Collect all global (w.r.t. this processor) status about this machine
 * check into our "mce" struct so that we can use it later to assess
 * the severity of the problem as we read per-bank specific details.
 */
static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
{
	mce_setup(m);

	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
	if (regs) {
		/*
		 * Get the address of the instruction at the time of
		 * the machine check error.
		 */
		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
			m->ip = regs->ip;
			m->cs = regs->cs;
495 496 497 498 499 500 501 502

			/*
			 * When in VM86 mode make the cs look like ring 3
			 * always. This is a lie, but it's better than passing
			 * the additional vm86 bit around everywhere.
			 */
			if (v8086_mode(regs))
				m->cs |= 3;
503 504
		}
		/* Use accurate RIP reporting if available. */
505 506
		if (mca_cfg.rip_msr)
			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
507 508 509
	}
}

A
Andi Kleen 已提交
510
int mce_available(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
511
{
512
	if (mca_cfg.disabled)
513
		return 0;
514
	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
L
Linus Torvalds 已提交
515 516
}

517 518
static void mce_schedule_work(void)
{
519
	if (!mce_gen_pool_empty())
520
		schedule_work(&mce_work);
521 522
}

523
static void mce_irq_work_cb(struct irq_work *entry)
524
{
525
	mce_notify_irq();
526
	mce_schedule_work();
527 528 529 530 531
}

static void mce_report_event(struct pt_regs *regs)
{
	if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
532
		mce_notify_irq();
533 534 535 536 537 538 539
		/*
		 * Triggering the work queue here is just an insurance
		 * policy in case the syscall exit notify handler
		 * doesn't run soon enough or ends up running on the
		 * wrong CPU (can happen when audit sleeps)
		 */
		mce_schedule_work();
540 541 542
		return;
	}

543
	irq_work_queue(&mce_irq_work);
544 545
}

546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
/*
 * Check if the address reported by the CPU is in a format we can parse.
 * It would be possible to add code for most other cases, but all would
 * be somewhat complicated (e.g. segment offset would require an instruction
 * parser). So only support physical addresses up to page granuality for now.
 */
static int mce_usable_address(struct mce *m)
{
	if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
		return 0;

	/* Checks after this one are Intel-specific: */
	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
		return 1;

	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
		return 0;
	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
		return 0;
	return 1;
}

568 569 570 571 572 573 574 575 576
static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *mce = (struct mce *)data;
	unsigned long pfn;

	if (!mce)
		return NOTIFY_DONE;

B
Borislav Petkov 已提交
577
	if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
578 579 580 581 582
		pfn = mce->addr >> PAGE_SHIFT;
		memory_failure(pfn, MCE_VECTOR, 0);
	}

	return NOTIFY_OK;
583
}
584 585 586 587
static struct notifier_block mce_srao_nb = {
	.notifier_call	= srao_decode_notifier,
	.priority = INT_MAX,
};
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *m = (struct mce *)data;

	if (!m)
		return NOTIFY_DONE;

	/*
	 * Run the default notifier if we have only the SRAO
	 * notifier and us registered.
	 */
	if (atomic_read(&num_notifiers) > 2)
		return NOTIFY_DONE;

	__print_mce(m);

	return NOTIFY_DONE;
}

static struct notifier_block mce_default_nb = {
	.notifier_call	= mce_default_notifier,
	/* lowest prio, we want it to run last. */
	.priority	= 0,
};

615 616 617 618 619 620
/*
 * Read ADDR and MISC registers.
 */
static void mce_read_aux(struct mce *m, int i)
{
	if (m->status & MCI_STATUS_MISCV)
621
		m->misc = mce_rdmsrl(msr_ops.misc(i));
622

623
	if (m->status & MCI_STATUS_ADDRV) {
624
		m->addr = mce_rdmsrl(msr_ops.addr(i));
625 626 627 628

		/*
		 * Mask the reported address by the reported granularity.
		 */
629
		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
630 631 632 633
			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
			m->addr >>= shift;
			m->addr <<= shift;
		}
634 635 636 637 638 639 640 641 642 643

		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m->addr >> 56) & 0x3f;

			m->addr &= GENMASK_ULL(55, lsb);
		}
644
	}
645

646 647 648 649 650 651
	if (mce_flags.smca) {
		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));

		if (m->status & MCI_STATUS_SYNDV)
			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
	}
652 653
}

654 655 656 657 658
static bool memory_error(struct mce *m)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;

	if (c->x86_vendor == X86_VENDOR_AMD) {
659 660 661 662
		/* ErrCodeExt[20:16] */
		u8 xec = (m->status >> 16) & 0x1f;

		return (xec == 0x0 || xec == 0x8);
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	} else if (c->x86_vendor == X86_VENDOR_INTEL) {
		/*
		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
		 *
		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
		 * indicating a memory error. Bit 8 is used for indicating a
		 * cache hierarchy error. The combination of bit 2 and bit 3
		 * is used for indicating a `generic' cache hierarchy error
		 * But we can't just blindly check the above bits, because if
		 * bit 11 is set, then it is a bus/interconnect error - and
		 * either way the above bits just gives more detail on what
		 * bus/interconnect error happened. Note that bit 12 can be
		 * ignored, as it's the "filter" bit.
		 */
		return (m->status & 0xef80) == BIT(7) ||
		       (m->status & 0xef00) == BIT(8) ||
		       (m->status & 0xeffc) == 0xc;
	}

	return false;
}

685 686
DEFINE_PER_CPU(unsigned, mce_poll_count);

687
/*
688 689 690 691
 * Poll for corrected events or events that happened before reset.
 * Those are just logged through /dev/mcelog.
 *
 * This is executed in standard interrupt context.
A
Andi Kleen 已提交
692 693 694 695 696 697 698 699 700
 *
 * Note: spec recommends to panic for fatal unsignalled
 * errors here. However this would be quite problematic --
 * we would need to reimplement the Monarch handling and
 * it would mess up the exclusion between exception handler
 * and poll hander -- * so we skip this for now.
 * These cases should not happen anyways, or only when the CPU
 * is already totally * confused. In this case it's likely it will
 * not fully execute the machine check handler either.
701
 */
702
bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
703
{
704
	bool error_seen = false;
705
	struct mce m;
706
	int severity;
707 708
	int i;

709
	this_cpu_inc(mce_poll_count);
710

711
	mce_gather_info(&m, NULL);
712

713 714 715 716 717 718 719 720 721
	/*
	 * m.tsc was set in mce_setup(). Clear it if not requested.
	 *
	 * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
	 *	  that dance.
	 */
	if (!(flags & MCP_TIMESTAMP))
		m.tsc = 0;

722
	for (i = 0; i < mca_cfg.banks; i++) {
723
		if (!mce_banks[i].ctl || !test_bit(i, *b))
724 725 726 727 728 729 730
			continue;

		m.misc = 0;
		m.addr = 0;
		m.bank = i;

		barrier();
731
		m.status = mce_rdmsrl(msr_ops.status(i));
732 733 734 735
		if (!(m.status & MCI_STATUS_VAL))
			continue;

		/*
A
Andi Kleen 已提交
736 737
		 * Uncorrected or signalled events are handled by the exception
		 * handler when it is enabled, so don't process those here.
738 739 740
		 *
		 * TBD do the same check for MCI_STATUS_EN here?
		 */
A
Andi Kleen 已提交
741
		if (!(flags & MCP_UC) &&
742
		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
743 744
			continue;

745 746
		error_seen = true;

747
		mce_read_aux(&m, i);
748

749 750
		severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);

B
Borislav Petkov 已提交
751 752
		if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
			if (m.status & MCI_STATUS_ADDRV)
753
				m.severity = severity;
754

755 756 757 758
		/*
		 * Don't get the IP here because it's unlikely to
		 * have anything to do with the actual error location.
		 */
759
		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
A
Andi Kleen 已提交
760
			mce_log(&m);
B
Borislav Petkov 已提交
761
		else if (mce_usable_address(&m)) {
762 763 764 765 766 767 768
			/*
			 * Although we skipped logging this, we still want
			 * to take action. Add to the pool so the registered
			 * notifiers will see it.
			 */
			if (!mce_gen_pool_add(&m))
				mce_schedule_work();
769
		}
770 771 772 773

		/*
		 * Clear state for this bank.
		 */
774
		mce_wrmsrl(msr_ops.status(i), 0);
775 776 777 778 779 780
	}

	/*
	 * Don't clear MCG_STATUS here because it's only defined for
	 * exceptions.
	 */
781 782

	sync_core();
783

784
	return error_seen;
785
}
786
EXPORT_SYMBOL_GPL(machine_check_poll);
787

788 789 790 791
/*
 * Do a quick check if any of the events requires a panic.
 * This decides if we keep the events around or clear them.
 */
792 793
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
			  struct pt_regs *regs)
794
{
795
	int i, ret = 0;
796
	char *tmp;
797

798
	for (i = 0; i < mca_cfg.banks; i++) {
799
		m->status = mce_rdmsrl(msr_ops.status(i));
800
		if (m->status & MCI_STATUS_VAL) {
801
			__set_bit(i, validp);
802 803 804
			if (quirk_no_way_out)
				quirk_no_way_out(i, m, regs);
		}
805 806 807

		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
			*msg = tmp;
808
			ret = 1;
809
		}
810
	}
811
	return ret;
812 813
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827
/*
 * Variable to establish order between CPUs while scanning.
 * Each CPU spins initially until executing is equal its number.
 */
static atomic_t mce_executing;

/*
 * Defines order of CPUs on entry. First CPU becomes Monarch.
 */
static atomic_t mce_callin;

/*
 * Check if a timeout waiting for other CPUs happened.
 */
828
static int mce_timed_out(u64 *t, const char *msg)
829 830 831 832 833 834 835 836
{
	/*
	 * The others already did panic for some reason.
	 * Bail out like in a timeout.
	 * rmb() to tell the compiler that system_state
	 * might have been modified by someone else.
	 */
	rmb();
837
	if (atomic_read(&mce_panicked))
838
		wait_for_panic();
839
	if (!mca_cfg.monarch_timeout)
840 841
		goto out;
	if ((s64)*t < SPINUNIT) {
842
		if (mca_cfg.tolerant <= 1)
843
			mce_panic(msg, NULL, NULL);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
		cpu_missing = 1;
		return 1;
	}
	*t -= SPINUNIT;
out:
	touch_nmi_watchdog();
	return 0;
}

/*
 * The Monarch's reign.  The Monarch is the CPU who entered
 * the machine check handler first. It waits for the others to
 * raise the exception too and then grades them. When any
 * error is fatal panic. Only then let the others continue.
 *
 * The other CPUs entering the MCE handler will be controlled by the
 * Monarch. They are called Subjects.
 *
 * This way we prevent any potential data corruption in a unrecoverable case
 * and also makes sure always all CPU's errors are examined.
 *
865
 * Also this detects the case of a machine check event coming from outer
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
 * space (not detected by any CPUs) In this case some external agent wants
 * us to shut down, so panic too.
 *
 * The other CPUs might still decide to panic if the handler happens
 * in a unrecoverable place, but in this case the system is in a semi-stable
 * state and won't corrupt anything by itself. It's ok to let the others
 * continue for a bit first.
 *
 * All the spin loops have timeouts; when a timeout happens a CPU
 * typically elects itself to be Monarch.
 */
static void mce_reign(void)
{
	int cpu;
	struct mce *m = NULL;
	int global_worst = 0;
	char *msg = NULL;
	char *nmsg = NULL;

	/*
	 * This CPU is the Monarch and the other CPUs have run
	 * through their handlers.
	 * Grade the severity of the errors of all the CPUs.
	 */
	for_each_possible_cpu(cpu) {
891 892
		int severity = mce_severity(&per_cpu(mces_seen, cpu),
					    mca_cfg.tolerant,
893
					    &nmsg, true);
894 895 896 897 898 899 900 901 902 903 904 905
		if (severity > global_worst) {
			msg = nmsg;
			global_worst = severity;
			m = &per_cpu(mces_seen, cpu);
		}
	}

	/*
	 * Cannot recover? Panic here then.
	 * This dumps all the mces in the log buffer and stops the
	 * other CPUs.
	 */
906
	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
907
		mce_panic("Fatal machine check", m, msg);
908 909 910 911 912 913 914 915 916 917 918

	/*
	 * For UC somewhere we let the CPU who detects it handle it.
	 * Also must let continue the others, otherwise the handling
	 * CPU could deadlock on a lock.
	 */

	/*
	 * No machine check event found. Must be some external
	 * source or one CPU is hung. Panic.
	 */
919
	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
920
		mce_panic("Fatal machine check from unknown source", NULL, NULL);
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938

	/*
	 * Now clear all the mces_seen so that they don't reappear on
	 * the next mce.
	 */
	for_each_possible_cpu(cpu)
		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
}

static atomic_t global_nwo;

/*
 * Start of Monarch synchronization. This waits until all CPUs have
 * entered the exception handler and then determines if any of them
 * saw a fatal event that requires panic. Then it executes them
 * in the entry order.
 * TBD double check parallel CPU hotunplug
 */
H
Hidetoshi Seto 已提交
939
static int mce_start(int *no_way_out)
940
{
H
Hidetoshi Seto 已提交
941
	int order;
942
	int cpus = num_online_cpus();
943
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
944

H
Hidetoshi Seto 已提交
945 946
	if (!timeout)
		return -1;
947

H
Hidetoshi Seto 已提交
948
	atomic_add(*no_way_out, &global_nwo);
949
	/*
950 951
	 * Rely on the implied barrier below, such that global_nwo
	 * is updated before mce_callin.
952
	 */
953
	order = atomic_inc_return(&mce_callin);
954 955 956 957 958

	/*
	 * Wait for everyone.
	 */
	while (atomic_read(&mce_callin) != cpus) {
959 960
		if (mce_timed_out(&timeout,
				  "Timeout: Not all CPUs entered broadcast exception handler")) {
961
			atomic_set(&global_nwo, 0);
H
Hidetoshi Seto 已提交
962
			return -1;
963 964 965 966
		}
		ndelay(SPINUNIT);
	}

967 968 969 970
	/*
	 * mce_callin should be read before global_nwo
	 */
	smp_rmb();
971

H
Hidetoshi Seto 已提交
972 973 974 975
	if (order == 1) {
		/*
		 * Monarch: Starts executing now, the others wait.
		 */
976
		atomic_set(&mce_executing, 1);
H
Hidetoshi Seto 已提交
977 978 979 980 981 982 983 984
	} else {
		/*
		 * Subject: Now start the scanning loop one by one in
		 * the original callin order.
		 * This way when there are any shared banks it will be
		 * only seen by one CPU before cleared, avoiding duplicates.
		 */
		while (atomic_read(&mce_executing) < order) {
985 986
			if (mce_timed_out(&timeout,
					  "Timeout: Subject CPUs unable to finish machine check processing")) {
H
Hidetoshi Seto 已提交
987 988 989 990 991
				atomic_set(&global_nwo, 0);
				return -1;
			}
			ndelay(SPINUNIT);
		}
992 993 994
	}

	/*
H
Hidetoshi Seto 已提交
995
	 * Cache the global no_way_out state.
996
	 */
H
Hidetoshi Seto 已提交
997 998 999
	*no_way_out = atomic_read(&global_nwo);

	return order;
1000 1001 1002 1003 1004 1005 1006 1007 1008
}

/*
 * Synchronize between CPUs after main scanning loop.
 * This invokes the bulk of the Monarch processing.
 */
static int mce_end(int order)
{
	int ret = -1;
1009
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

	if (!timeout)
		goto reset;
	if (order < 0)
		goto reset;

	/*
	 * Allow others to run.
	 */
	atomic_inc(&mce_executing);

	if (order == 1) {
		/* CHECKME: Can this race with a parallel hotplug? */
		int cpus = num_online_cpus();

		/*
		 * Monarch: Wait for everyone to go through their scanning
		 * loops.
		 */
		while (atomic_read(&mce_executing) <= cpus) {
1030 1031
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU unable to finish machine check processing"))
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
				goto reset;
			ndelay(SPINUNIT);
		}

		mce_reign();
		barrier();
		ret = 0;
	} else {
		/*
		 * Subject: Wait for Monarch to finish.
		 */
		while (atomic_read(&mce_executing) != 0) {
1044 1045
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU did not finish machine check processing"))
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
				goto reset;
			ndelay(SPINUNIT);
		}

		/*
		 * Don't reset anything. That's done by the Monarch.
		 */
		return 0;
	}

	/*
	 * Reset all global state.
	 */
reset:
	atomic_set(&global_nwo, 0);
	atomic_set(&mce_callin, 0);
	barrier();

	/*
	 * Let others run again.
	 */
	atomic_set(&mce_executing, 0);
	return ret;
}

static void mce_clear_state(unsigned long *toclear)
{
	int i;

1075
	for (i = 0; i < mca_cfg.banks; i++) {
1076
		if (test_bit(i, toclear))
1077
			mce_wrmsrl(msr_ops.status(i), 0);
1078 1079 1080
	}
}

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
static int do_memory_failure(struct mce *m)
{
	int flags = MF_ACTION_REQUIRED;
	int ret;

	pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
	if (!(m->mcgstatus & MCG_STATUS_RIPV))
		flags |= MF_MUST_KILL;
	ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
	if (ret)
		pr_err("Memory error not recovered");
	return ret;
}

1095 1096 1097 1098 1099 1100 1101
/*
 * The actual machine check handler. This only handles real
 * exceptions when something got corrupted coming in through int 18.
 *
 * This is executed in NMI context not subject to normal locking rules. This
 * implies that most kernel services cannot be safely used. Don't even
 * think about putting a printk in there!
1102 1103 1104 1105
 *
 * On Intel systems this is entered on all CPUs in parallel through
 * MCE broadcast. However some CPUs might be broken beyond repair,
 * so be always careful when synchronizing with others.
L
Linus Torvalds 已提交
1106
 */
I
Ingo Molnar 已提交
1107
void do_machine_check(struct pt_regs *regs, long error_code)
L
Linus Torvalds 已提交
1108
{
1109
	struct mca_config *cfg = &mca_cfg;
1110
	struct mce m, *final;
L
Linus Torvalds 已提交
1111
	int i;
1112 1113
	int worst = 0;
	int severity;
1114

1115 1116 1117 1118
	/*
	 * Establish sequential order between the CPUs entering the machine
	 * check handler.
	 */
1119
	int order = -1;
1120 1121
	/*
	 * If no_way_out gets set, there is no safe way to recover from this
1122
	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1123 1124 1125 1126 1127 1128 1129
	 */
	int no_way_out = 0;
	/*
	 * If kill_it gets set, there might be a way to recover from this
	 * error.
	 */
	int kill_it = 0;
1130
	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1131
	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1132
	char *msg = "Unknown";
1133 1134 1135 1136 1137 1138

	/*
	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
	 * on Intel.
	 */
	int lmce = 1;
L
Linus Torvalds 已提交
1139

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
	/* If this CPU is offline, just bail out. */
	if (cpu_is_offline(smp_processor_id())) {
		u64 mcgstatus;

		mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
		if (mcgstatus & MCG_STATUS_RIPV) {
			mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
			return;
		}
	}

1151
	ist_enter(regs);
1152

1153
	this_cpu_inc(mce_exception_count);
1154

1155
	if (!cfg->banks)
1156
		goto out;
L
Linus Torvalds 已提交
1157

1158
	mce_gather_info(&m, regs);
1159

1160
	final = this_cpu_ptr(&mces_seen);
1161 1162
	*final = m;

1163
	memset(valid_banks, 0, sizeof(valid_banks));
1164
	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1165

L
Linus Torvalds 已提交
1166 1167
	barrier();

A
Andi Kleen 已提交
1168
	/*
1169 1170 1171
	 * When no restart IP might need to kill or panic.
	 * Assume the worst for now, but if we find the
	 * severity is MCE_AR_SEVERITY we have other options.
A
Andi Kleen 已提交
1172 1173 1174 1175
	 */
	if (!(m.mcgstatus & MCG_STATUS_RIPV))
		kill_it = 1;

1176
	/*
1177 1178
	 * Check if this MCE is signaled to only this logical processor,
	 * on Intel only.
1179
	 */
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (m.cpuvendor == X86_VENDOR_INTEL)
		lmce = m.mcgstatus & MCG_STATUS_LMCES;

	/*
	 * Go through all banks in exclusion of the other CPUs. This way we
	 * don't report duplicated events on shared banks because the first one
	 * to see it will clear it. If this is a Local MCE, then no need to
	 * perform rendezvous.
	 */
	if (!lmce)
A
Ashok Raj 已提交
1190 1191
		order = mce_start(&no_way_out);

1192
	for (i = 0; i < cfg->banks; i++) {
1193
		__clear_bit(i, toclear);
1194 1195
		if (!test_bit(i, valid_banks))
			continue;
1196
		if (!mce_banks[i].ctl)
L
Linus Torvalds 已提交
1197
			continue;
1198 1199

		m.misc = 0;
L
Linus Torvalds 已提交
1200 1201 1202
		m.addr = 0;
		m.bank = i;

1203
		m.status = mce_rdmsrl(msr_ops.status(i));
L
Linus Torvalds 已提交
1204 1205 1206
		if ((m.status & MCI_STATUS_VAL) == 0)
			continue;

1207
		/*
A
Andi Kleen 已提交
1208 1209
		 * Non uncorrected or non signaled errors are handled by
		 * machine_check_poll. Leave them alone, unless this panics.
1210
		 */
1211
		if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
A
Andi Kleen 已提交
1212
			!no_way_out)
1213 1214 1215 1216 1217
			continue;

		/*
		 * Set taint even when machine check was not enabled.
		 */
1218
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1219

1220
		severity = mce_severity(&m, cfg->tolerant, NULL, true);
1221

A
Andi Kleen 已提交
1222
		/*
1223 1224
		 * When machine check was for corrected/deferred handler don't
		 * touch, unless we're panicing.
A
Andi Kleen 已提交
1225
		 */
1226 1227
		if ((severity == MCE_KEEP_SEVERITY ||
		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
A
Andi Kleen 已提交
1228 1229 1230
			continue;
		__set_bit(i, toclear);
		if (severity == MCE_NO_SEVERITY) {
1231 1232 1233 1234 1235
			/*
			 * Machine check event was not enabled. Clear, but
			 * ignore.
			 */
			continue;
L
Linus Torvalds 已提交
1236 1237
		}

1238
		mce_read_aux(&m, i);
L
Linus Torvalds 已提交
1239

1240 1241
		/* assuming valid severity level != 0 */
		m.severity = severity;
1242

1243
		mce_log(&m);
L
Linus Torvalds 已提交
1244

1245 1246 1247
		if (severity > worst) {
			*final = m;
			worst = severity;
L
Linus Torvalds 已提交
1248 1249 1250
		}
	}

1251 1252 1253
	/* mce_clear_state will clear *final, save locally for use later */
	m = *final;

1254 1255 1256
	if (!no_way_out)
		mce_clear_state(toclear);

I
Ingo Molnar 已提交
1257
	/*
1258 1259
	 * Do most of the synchronization with other CPUs.
	 * When there's any problem use only local no_way_out state.
I
Ingo Molnar 已提交
1260
	 */
A
Ashok Raj 已提交
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	if (!lmce) {
		if (mce_end(order) < 0)
			no_way_out = worst >= MCE_PANIC_SEVERITY;
	} else {
		/*
		 * Local MCE skipped calling mce_reign()
		 * If we found a fatal error, we need to panic here.
		 */
		 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
			mce_panic("Machine check from unknown source",
				NULL, NULL);
	}
1273 1274

	/*
1275 1276
	 * If tolerant is at an insane level we drop requests to kill
	 * processes and continue even when there is no way out.
1277
	 */
1278 1279 1280 1281
	if (cfg->tolerant == 3)
		kill_it = 0;
	else if (no_way_out)
		mce_panic("Fatal machine check on current CPU", &m, msg);
1282

1283 1284
	if (worst > 0)
		mce_report_event(regs);
1285
	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1286
out:
1287
	sync_core();
1288

1289 1290
	if (worst != MCE_AR_SEVERITY && !kill_it)
		goto out_ist;
1291

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
	/* Fault was in user mode and we need to take some action */
	if ((m.cs & 3) == 3) {
		ist_begin_non_atomic(regs);
		local_irq_enable();

		if (kill_it || do_memory_failure(&m))
			force_sig(SIGBUS, current);
		local_irq_disable();
		ist_end_non_atomic();
	} else {
		if (!fixup_exception(regs, X86_TRAP_MC))
			mce_panic("Failed kernel mode recovery", &m, NULL);
1304
	}
1305 1306

out_ist:
1307
	ist_exit(regs);
L
Linus Torvalds 已提交
1308
}
1309
EXPORT_SYMBOL_GPL(do_machine_check);
L
Linus Torvalds 已提交
1310

1311 1312
#ifndef CONFIG_MEMORY_FAILURE
int memory_failure(unsigned long pfn, int vector, int flags)
1313
{
1314 1315
	/* mce_severity() should not hand us an ACTION_REQUIRED error */
	BUG_ON(flags & MF_ACTION_REQUIRED);
1316 1317 1318
	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
	       pfn);
1319 1320

	return 0;
1321
}
1322
#endif
1323

1324 1325 1326
/*
 * Action optional processing happens here (picking up
 * from the list of faulting pages that do_machine_check()
1327
 * placed into the genpool).
1328
 */
1329 1330
static void mce_process_work(struct work_struct *dummy)
{
1331
	mce_gen_pool_process();
1332 1333
}

L
Linus Torvalds 已提交
1334
/*
1335 1336 1337
 * Periodic polling timer for "silent" machine check errors.  If the
 * poller finds an MCE, poll 2x faster.  When the poller finds no more
 * errors, poll 2x slower (up to check_interval seconds).
L
Linus Torvalds 已提交
1338
 */
1339
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
I
Ingo Molnar 已提交
1340

T
Thomas Gleixner 已提交
1341
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1342
static DEFINE_PER_CPU(struct timer_list, mce_timer);
L
Linus Torvalds 已提交
1343

C
Chen Gong 已提交
1344 1345 1346 1347 1348
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
	return interval;
}

1349
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
C
Chen Gong 已提交
1350

1351
static void __restart_timer(struct timer_list *t, unsigned long interval)
1352
{
1353 1354
	unsigned long when = jiffies + interval;
	unsigned long flags;
1355

1356
	local_irq_save(flags);
1357

1358 1359
	if (timer_pending(t)) {
		if (time_before(when, t->expires))
1360
			mod_timer(t, when);
1361 1362 1363 1364 1365 1366
	} else {
		t->expires = round_jiffies(when);
		add_timer_on(t, smp_processor_id());
	}

	local_irq_restore(flags);
1367 1368
}

T
Thomas Gleixner 已提交
1369
static void mce_timer_fn(unsigned long data)
L
Linus Torvalds 已提交
1370
{
1371
	struct timer_list *t = this_cpu_ptr(&mce_timer);
1372
	int cpu = smp_processor_id();
T
Thomas Gleixner 已提交
1373
	unsigned long iv;
1374

1375 1376 1377
	WARN_ON(cpu != data);

	iv = __this_cpu_read(mce_next_interval);
1378

1379
	if (mce_available(this_cpu_ptr(&cpu_info))) {
1380
		machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1381 1382 1383 1384 1385

		if (mce_intel_cmci_poll()) {
			iv = mce_adjust_timer(iv);
			goto done;
		}
I
Ingo Molnar 已提交
1386
	}
L
Linus Torvalds 已提交
1387 1388

	/*
1389 1390
	 * Alert userspace if needed. If we logged an MCE, reduce the polling
	 * interval, otherwise increase the polling interval.
L
Linus Torvalds 已提交
1391
	 */
1392
	if (mce_notify_irq())
1393
		iv = max(iv / 2, (unsigned long) HZ/100);
1394
	else
T
Thomas Gleixner 已提交
1395
		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1396 1397

done:
T
Thomas Gleixner 已提交
1398
	__this_cpu_write(mce_next_interval, iv);
1399
	__restart_timer(t, iv);
C
Chen Gong 已提交
1400
}
1401

C
Chen Gong 已提交
1402 1403 1404 1405 1406
/*
 * Ensure that the timer is firing in @interval from now.
 */
void mce_timer_kick(unsigned long interval)
{
1407
	struct timer_list *t = this_cpu_ptr(&mce_timer);
C
Chen Gong 已提交
1408 1409
	unsigned long iv = __this_cpu_read(mce_next_interval);

1410 1411
	__restart_timer(t, interval);

C
Chen Gong 已提交
1412 1413
	if (interval < iv)
		__this_cpu_write(mce_next_interval, interval);
1414 1415
}

1416 1417 1418 1419 1420 1421 1422 1423 1424
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		del_timer_sync(&per_cpu(mce_timer, cpu));
}

1425 1426
static void mce_do_trigger(struct work_struct *work)
{
1427
	call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1428 1429 1430 1431
}

static DECLARE_WORK(mce_trigger_work, mce_do_trigger);

1432
/*
1433 1434 1435
 * Notify the user(s) about new machine check events.
 * Can be called from interrupt context, but not from machine check/NMI
 * context.
1436
 */
1437
int mce_notify_irq(void)
1438
{
1439 1440 1441
	/* Not more than two messages every minute */
	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);

1442
	if (test_and_clear_bit(0, &mce_need_notify)) {
1443 1444
		/* wake processes polling /dev/mcelog */
		wake_up_interruptible(&mce_chrdev_wait);
1445

1446
		if (mce_helper[0])
1447
			schedule_work(&mce_trigger_work);
1448

1449
		if (__ratelimit(&ratelimit))
H
Huang Ying 已提交
1450
			pr_info(HW_ERR "Machine check events logged\n");
1451 1452

		return 1;
L
Linus Torvalds 已提交
1453
	}
1454 1455
	return 0;
}
1456
EXPORT_SYMBOL_GPL(mce_notify_irq);
1457

1458
static int __mcheck_cpu_mce_banks_init(void)
1459 1460
{
	int i;
1461
	u8 num_banks = mca_cfg.banks;
1462

1463
	mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1464 1465
	if (!mce_banks)
		return -ENOMEM;
1466 1467

	for (i = 0; i < num_banks; i++) {
1468
		struct mce_bank *b = &mce_banks[i];
1469

1470 1471 1472 1473 1474 1475
		b->ctl = -1ULL;
		b->init = 1;
	}
	return 0;
}

1476
/*
L
Linus Torvalds 已提交
1477 1478
 * Initialize Machine Checks for a CPU.
 */
1479
static int __mcheck_cpu_cap_init(void)
L
Linus Torvalds 已提交
1480
{
1481
	unsigned b;
I
Ingo Molnar 已提交
1482
	u64 cap;
L
Linus Torvalds 已提交
1483 1484

	rdmsrl(MSR_IA32_MCG_CAP, cap);
1485 1486

	b = cap & MCG_BANKCNT_MASK;
1487
	if (!mca_cfg.banks)
1488
		pr_info("CPU supports %d MCE banks\n", b);
1489

1490
	if (b > MAX_NR_BANKS) {
1491
		pr_warn("Using only %u machine check banks out of %u\n",
1492 1493 1494 1495 1496
			MAX_NR_BANKS, b);
		b = MAX_NR_BANKS;
	}

	/* Don't support asymmetric configurations today */
1497 1498 1499
	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
	mca_cfg.banks = b;

1500
	if (!mce_banks) {
H
Hidetoshi Seto 已提交
1501
		int err = __mcheck_cpu_mce_banks_init();
1502

1503 1504
		if (err)
			return err;
L
Linus Torvalds 已提交
1505
	}
1506

1507
	/* Use accurate RIP reporting if available. */
1508
	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1509
		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
L
Linus Torvalds 已提交
1510

A
Andi Kleen 已提交
1511
	if (cap & MCG_SER_P)
1512
		mca_cfg.ser = true;
A
Andi Kleen 已提交
1513

1514 1515 1516
	return 0;
}

1517
static void __mcheck_cpu_init_generic(void)
1518
{
1519
	enum mcp_flags m_fl = 0;
I
Ingo Molnar 已提交
1520
	mce_banks_t all_banks;
1521 1522
	u64 cap;

1523 1524 1525
	if (!mca_cfg.bootlog)
		m_fl = MCP_DONTLOG;

1526 1527 1528
	/*
	 * Log the machine checks left over from the previous reset.
	 */
1529
	bitmap_fill(all_banks, MAX_NR_BANKS);
1530
	machine_check_poll(MCP_UC | m_fl, &all_banks);
L
Linus Torvalds 已提交
1531

A
Andy Lutomirski 已提交
1532
	cr4_set_bits(X86_CR4_MCE);
L
Linus Torvalds 已提交
1533

1534
	rdmsrl(MSR_IA32_MCG_CAP, cap);
L
Linus Torvalds 已提交
1535 1536
	if (cap & MCG_CTL_P)
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1537 1538 1539 1540 1541
}

static void __mcheck_cpu_init_clear_banks(void)
{
	int i;
L
Linus Torvalds 已提交
1542

1543
	for (i = 0; i < mca_cfg.banks; i++) {
1544
		struct mce_bank *b = &mce_banks[i];
1545

1546
		if (!b->init)
1547
			continue;
1548 1549
		wrmsrl(msr_ops.ctl(i), b->ctl);
		wrmsrl(msr_ops.status(i), 0);
1550
	}
L
Linus Torvalds 已提交
1551 1552
}

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
/*
 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
 * Vol 3B Table 15-20). But this confuses both the code that determines
 * whether the machine check occurred in kernel or user mode, and also
 * the severity assessment code. Pretend that EIPV was set, and take the
 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
 */
static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
{
	if (bank != 0)
		return;
	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
		return;
	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
			  MCACOD)) !=
			 (MCI_STATUS_UC|MCI_STATUS_EN|
			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
			  MCI_STATUS_AR|MCACOD_INSTR))
		return;

	m->mcgstatus |= MCG_STATUS_EIPV;
	m->ip = regs->ip;
	m->cs = regs->cs;
}

L
Linus Torvalds 已提交
1581
/* Add per CPU specific workarounds here */
1582
static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1583
{
1584 1585
	struct mca_config *cfg = &mca_cfg;

1586
	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1587
		pr_info("unknown CPU type - not enabling MCE support\n");
1588 1589 1590
		return -EOPNOTSUPP;
	}

L
Linus Torvalds 已提交
1591
	/* This should be disabled by the BIOS, but isn't always */
1592
	if (c->x86_vendor == X86_VENDOR_AMD) {
1593
		if (c->x86 == 15 && cfg->banks > 4) {
I
Ingo Molnar 已提交
1594 1595 1596 1597 1598
			/*
			 * disable GART TBL walk error reporting, which
			 * trips off incorrectly with the IOMMU & 3ware
			 * & Cerberus:
			 */
1599
			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
I
Ingo Molnar 已提交
1600
		}
1601
		if (c->x86 < 17 && cfg->bootlog < 0) {
I
Ingo Molnar 已提交
1602 1603 1604 1605
			/*
			 * Lots of broken BIOS around that don't clear them
			 * by default and leave crap in there. Don't log:
			 */
1606
			cfg->bootlog = 0;
I
Ingo Molnar 已提交
1607
		}
1608 1609 1610 1611
		/*
		 * Various K7s with broken bank 0 around. Always disable
		 * by default.
		 */
1612
		if (c->x86 == 6 && cfg->banks > 0)
1613
			mce_banks[0].ctl = 0;
1614

1615 1616 1617 1618 1619 1620 1621
		/*
		 * overflow_recov is supported for F15h Models 00h-0fh
		 * even though we don't have a CPUID bit for it.
		 */
		if (c->x86 == 0x15 && c->x86_model <= 0xf)
			mce_flags.overflow_recov = 1;

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
		/*
		 * Turn off MC4_MISC thresholding banks on those models since
		 * they're not supported there.
		 */
		if (c->x86 == 0x15 &&
		    (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
			int i;
			u64 hwcr;
			bool need_toggle;
			u32 msrs[] = {
1632 1633
				0x00000413, /* MC4_MISC0 */
				0xc0000408, /* MC4_MISC1 */
1634
			};
1635

1636
			rdmsrl(MSR_K7_HWCR, hwcr);
1637

1638 1639
			/* McStatusWrEn has to be set */
			need_toggle = !(hwcr & BIT(18));
1640

1641 1642
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1643

1644 1645 1646
			/* Clear CntP bit safely */
			for (i = 0; i < ARRAY_SIZE(msrs); i++)
				msr_clear_bit(msrs[i], 62);
1647

1648 1649 1650 1651
			/* restore old settings */
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr);
		}
L
Linus Torvalds 已提交
1652
	}
1653

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		/*
		 * SDM documents that on family 6 bank 0 should not be written
		 * because it aliases to another special BIOS controlled
		 * register.
		 * But it's not aliased anymore on model 0x1a+
		 * Don't ignore bank 0 completely because there could be a
		 * valid event later, merely don't write CTL0.
		 */

1664
		if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1665
			mce_banks[0].init = 0;
1666 1667 1668 1669 1670 1671

		/*
		 * All newer Intel systems support MCE broadcasting. Enable
		 * synchronization with a one second timeout.
		 */
		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1672 1673
			cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
1674

1675 1676 1677 1678
		/*
		 * There are also broken BIOSes on some Pentium M and
		 * earlier systems:
		 */
1679 1680
		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
			cfg->bootlog = 0;
1681 1682 1683

		if (c->x86 == 6 && c->x86_model == 45)
			quirk_no_way_out = quirk_sandybridge_ifu;
1684
	}
1685 1686 1687
	if (cfg->monarch_timeout < 0)
		cfg->monarch_timeout = 0;
	if (cfg->bootlog != 0)
1688
		cfg->panic_timeout = 30;
1689 1690

	return 0;
1691
}
L
Linus Torvalds 已提交
1692

1693
static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1694 1695
{
	if (c->x86 != 5)
1696 1697
		return 0;

1698 1699
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
1700
		intel_p5_mcheck_init(c);
1701
		return 1;
1702 1703 1704
		break;
	case X86_VENDOR_CENTAUR:
		winchip_mcheck_init(c);
1705
		return 1;
1706
		break;
1707 1708
	default:
		return 0;
1709
	}
1710 1711

	return 0;
1712 1713
}

1714
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1715 1716 1717 1718
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_init(c);
1719
		mce_adjust_timer = cmci_intel_adjust_timer;
L
Linus Torvalds 已提交
1720
		break;
1721 1722

	case X86_VENDOR_AMD: {
1723 1724 1725
		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735

		/*
		 * Install proper ops for Scalable MCA enabled processors
		 */
		if (mce_flags.smca) {
			msr_ops.ctl	= smca_ctl_reg;
			msr_ops.status	= smca_status_reg;
			msr_ops.addr	= smca_addr_reg;
			msr_ops.misc	= smca_misc_reg;
		}
1736
		mce_amd_feature_init(c);
1737

1738
		break;
1739 1740
		}

L
Linus Torvalds 已提交
1741 1742 1743 1744 1745
	default:
		break;
	}
}

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_clear(c);
		break;
	default:
		break;
	}
}

T
Thomas Gleixner 已提交
1757
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1758
{
1759
	unsigned long iv = check_interval * HZ;
1760

1761
	if (mca_cfg.ignore_ce || !iv)
1762 1763
		return;

1764 1765
	per_cpu(mce_next_interval, cpu) = iv;

T
Thomas Gleixner 已提交
1766
	t->expires = round_jiffies(jiffies + iv);
1767
	add_timer_on(t, cpu);
1768 1769
}

1770 1771 1772 1773 1774 1775 1776 1777
static void __mcheck_cpu_setup_timer(void)
{
	struct timer_list *t = this_cpu_ptr(&mce_timer);
	unsigned int cpu = smp_processor_id();

	setup_pinned_timer(t, mce_timer_fn, cpu);
}

T
Thomas Gleixner 已提交
1778 1779
static void __mcheck_cpu_init_timer(void)
{
1780
	struct timer_list *t = this_cpu_ptr(&mce_timer);
T
Thomas Gleixner 已提交
1781 1782
	unsigned int cpu = smp_processor_id();

1783
	setup_pinned_timer(t, mce_timer_fn, cpu);
T
Thomas Gleixner 已提交
1784 1785 1786
	mce_start_timer(cpu, t);
}

A
Andi Kleen 已提交
1787 1788 1789
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
1790
	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
A
Andi Kleen 已提交
1791 1792 1793 1794 1795 1796 1797
	       smp_processor_id());
}

/* Call the installed machine check handler for this CPU setup. */
void (*machine_check_vector)(struct pt_regs *, long error_code) =
						unexpected_machine_check;

1798
/*
L
Linus Torvalds 已提交
1799
 * Called for each booted CPU to set up machine checks.
I
Ingo Molnar 已提交
1800
 * Must be called with preempt off:
L
Linus Torvalds 已提交
1801
 */
1802
void mcheck_cpu_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1803
{
1804
	if (mca_cfg.disabled)
1805 1806
		return;

1807 1808
	if (__mcheck_cpu_ancient_init(c))
		return;
1809

1810
	if (!mce_available(c))
L
Linus Torvalds 已提交
1811 1812
		return;

1813
	if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1814
		mca_cfg.disabled = true;
1815 1816 1817
		return;
	}

1818 1819 1820 1821 1822 1823
	if (mce_gen_pool_init()) {
		mca_cfg.disabled = true;
		pr_emerg("Couldn't allocate MCE records pool!\n");
		return;
	}

1824 1825
	machine_check_vector = do_machine_check;

1826 1827
	__mcheck_cpu_init_generic();
	__mcheck_cpu_init_vendor(c);
1828
	__mcheck_cpu_init_clear_banks();
1829
	__mcheck_cpu_setup_timer();
L
Linus Torvalds 已提交
1830 1831
}

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
/*
 * Called for each booted CPU to clear some machine checks opt-ins
 */
void mcheck_cpu_clear(struct cpuinfo_x86 *c)
{
	if (mca_cfg.disabled)
		return;

	if (!mce_available(c))
		return;

	/*
	 * Possibly to clear general settings generic to x86
	 * __mcheck_cpu_clear_generic(c);
	 */
	__mcheck_cpu_clear_vendor(c);

L
Linus Torvalds 已提交
1849 1850 1851
}

/*
1852
 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
L
Linus Torvalds 已提交
1853 1854
 */

1855 1856 1857
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
static int mce_chrdev_open_count;	/* #times opened */
static int mce_chrdev_open_exclu;	/* already open exclusive? */
T
Tim Hockin 已提交
1858

1859
static int mce_chrdev_open(struct inode *inode, struct file *file)
T
Tim Hockin 已提交
1860
{
1861
	spin_lock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1862

1863 1864 1865
	if (mce_chrdev_open_exclu ||
	    (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
		spin_unlock(&mce_chrdev_state_lock);
I
Ingo Molnar 已提交
1866

T
Tim Hockin 已提交
1867 1868 1869 1870
		return -EBUSY;
	}

	if (file->f_flags & O_EXCL)
1871 1872
		mce_chrdev_open_exclu = 1;
	mce_chrdev_open_count++;
T
Tim Hockin 已提交
1873

1874
	spin_unlock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1875

1876
	return nonseekable_open(inode, file);
T
Tim Hockin 已提交
1877 1878
}

1879
static int mce_chrdev_release(struct inode *inode, struct file *file)
T
Tim Hockin 已提交
1880
{
1881
	spin_lock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1882

1883 1884
	mce_chrdev_open_count--;
	mce_chrdev_open_exclu = 0;
T
Tim Hockin 已提交
1885

1886
	spin_unlock(&mce_chrdev_state_lock);
T
Tim Hockin 已提交
1887 1888 1889 1890

	return 0;
}

1891 1892
static void collect_tscs(void *data)
{
L
Linus Torvalds 已提交
1893
	unsigned long *cpu_tsc = (unsigned long *)data;
1894

1895
	cpu_tsc[smp_processor_id()] = rdtsc();
1896
}
L
Linus Torvalds 已提交
1897

1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
static int mce_apei_read_done;

/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
static int __mce_read_apei(char __user **ubuf, size_t usize)
{
	int rc;
	u64 record_id;
	struct mce m;

	if (usize < sizeof(struct mce))
		return -EINVAL;

	rc = apei_read_mce(&m, &record_id);
	/* Error or no more MCE record */
	if (rc <= 0) {
		mce_apei_read_done = 1;
1914 1915 1916 1917 1918 1919
		/*
		 * When ERST is disabled, mce_chrdev_read() should return
		 * "no record" instead of "no device."
		 */
		if (rc == -ENODEV)
			return 0;
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
		return rc;
	}
	rc = -EFAULT;
	if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
		return rc;
	/*
	 * In fact, we should have cleared the record after that has
	 * been flushed to the disk or sent to network in
	 * /sbin/mcelog, but we have no interface to support that now,
	 * so just clear it to avoid duplication.
	 */
	rc = apei_clear_mce(record_id);
	if (rc) {
		mce_apei_read_done = 1;
		return rc;
	}
	*ubuf += sizeof(struct mce);

	return 0;
}

1941 1942
static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
				size_t usize, loff_t *off)
L
Linus Torvalds 已提交
1943
{
I
Ingo Molnar 已提交
1944
	char __user *buf = ubuf;
1945
	unsigned long *cpu_tsc;
1946
	unsigned prev, next;
L
Linus Torvalds 已提交
1947 1948
	int i, err;

1949
	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1950 1951 1952
	if (!cpu_tsc)
		return -ENOMEM;

1953
	mutex_lock(&mce_chrdev_read_mutex);
1954 1955 1956 1957 1958 1959 1960

	if (!mce_apei_read_done) {
		err = __mce_read_apei(&buf, usize);
		if (err || buf != ubuf)
			goto out;
	}

1961
	next = mce_log_get_idx_check(mcelog.next);
L
Linus Torvalds 已提交
1962 1963

	/* Only supports full reads right now */
1964 1965 1966
	err = -EINVAL;
	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
		goto out;
L
Linus Torvalds 已提交
1967 1968

	err = 0;
1969 1970 1971 1972
	prev = 0;
	do {
		for (i = prev; i < next; i++) {
			unsigned long start = jiffies;
H
Hidetoshi Seto 已提交
1973
			struct mce *m = &mcelog.entry[i];
1974

H
Hidetoshi Seto 已提交
1975
			while (!m->finished) {
1976
				if (time_after_eq(jiffies, start + 2)) {
H
Hidetoshi Seto 已提交
1977
					memset(m, 0, sizeof(*m));
1978 1979 1980
					goto timeout;
				}
				cpu_relax();
1981
			}
1982
			smp_rmb();
H
Hidetoshi Seto 已提交
1983 1984
			err |= copy_to_user(buf, m, sizeof(*m));
			buf += sizeof(*m);
1985 1986
timeout:
			;
1987
		}
L
Linus Torvalds 已提交
1988

1989 1990 1991 1992 1993
		memset(mcelog.entry + prev, 0,
		       (next - prev) * sizeof(struct mce));
		prev = next;
		next = cmpxchg(&mcelog.next, prev, 0);
	} while (next != prev);
L
Linus Torvalds 已提交
1994

1995
	synchronize_sched();
L
Linus Torvalds 已提交
1996

1997 1998 1999 2000
	/*
	 * Collect entries that were still getting written before the
	 * synchronize.
	 */
2001
	on_each_cpu(collect_tscs, cpu_tsc, 1);
I
Ingo Molnar 已提交
2002

2003
	for (i = next; i < MCE_LOG_LEN; i++) {
H
Hidetoshi Seto 已提交
2004 2005 2006 2007
		struct mce *m = &mcelog.entry[i];

		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
			err |= copy_to_user(buf, m, sizeof(*m));
L
Linus Torvalds 已提交
2008
			smp_rmb();
H
Hidetoshi Seto 已提交
2009 2010
			buf += sizeof(*m);
			memset(m, 0, sizeof(*m));
L
Linus Torvalds 已提交
2011
		}
2012
	}
2013 2014 2015 2016 2017

	if (err)
		err = -EFAULT;

out:
2018
	mutex_unlock(&mce_chrdev_read_mutex);
2019
	kfree(cpu_tsc);
I
Ingo Molnar 已提交
2020

2021
	return err ? err : buf - ubuf;
L
Linus Torvalds 已提交
2022 2023
}

2024
static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
2025
{
2026
	poll_wait(file, &mce_chrdev_wait, wait);
2027
	if (READ_ONCE(mcelog.next))
2028
		return POLLIN | POLLRDNORM;
2029 2030
	if (!mce_apei_read_done && apei_check_mce())
		return POLLIN | POLLRDNORM;
2031 2032 2033
	return 0;
}

2034 2035
static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
				unsigned long arg)
L
Linus Torvalds 已提交
2036 2037
{
	int __user *p = (int __user *)arg;
2038

L
Linus Torvalds 已提交
2039
	if (!capable(CAP_SYS_ADMIN))
2040
		return -EPERM;
I
Ingo Molnar 已提交
2041

L
Linus Torvalds 已提交
2042
	switch (cmd) {
2043
	case MCE_GET_RECORD_LEN:
L
Linus Torvalds 已提交
2044 2045
		return put_user(sizeof(struct mce), p);
	case MCE_GET_LOG_LEN:
2046
		return put_user(MCE_LOG_LEN, p);
L
Linus Torvalds 已提交
2047 2048
	case MCE_GETCLEAR_FLAGS: {
		unsigned flags;
2049 2050

		do {
L
Linus Torvalds 已提交
2051
			flags = mcelog.flags;
2052
		} while (cmpxchg(&mcelog.flags, flags, 0) != flags);
I
Ingo Molnar 已提交
2053

2054
		return put_user(flags, p);
L
Linus Torvalds 已提交
2055 2056
	}
	default:
2057 2058
		return -ENOTTY;
	}
L
Linus Torvalds 已提交
2059 2060
}

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
			    size_t usize, loff_t *off);

void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
			     const char __user *ubuf,
			     size_t usize, loff_t *off))
{
	mce_write = fn;
}
EXPORT_SYMBOL_GPL(register_mce_write_callback);

2072 2073
static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
				size_t usize, loff_t *off)
2074 2075 2076 2077 2078 2079 2080 2081
{
	if (mce_write)
		return mce_write(filp, ubuf, usize, off);
	else
		return -EINVAL;
}

static const struct file_operations mce_chrdev_ops = {
2082 2083 2084
	.open			= mce_chrdev_open,
	.release		= mce_chrdev_release,
	.read			= mce_chrdev_read,
2085
	.write			= mce_chrdev_write,
2086 2087 2088
	.poll			= mce_chrdev_poll,
	.unlocked_ioctl		= mce_chrdev_ioctl,
	.llseek			= no_llseek,
L
Linus Torvalds 已提交
2089 2090
};

2091
static struct miscdevice mce_chrdev_device = {
L
Linus Torvalds 已提交
2092 2093 2094 2095 2096
	MISC_MCELOG_MINOR,
	"mcelog",
	&mce_chrdev_ops,
};

2097 2098 2099
static void __mce_disable_bank(void *arg)
{
	int bank = *((int *)arg);
2100
	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	cmci_disable_bank(bank);
}

void mce_disable_bank(int bank)
{
	if (bank >= mca_cfg.banks) {
		pr_warn(FW_BUG
			"Ignoring request to disable invalid MCA bank %d.\n",
			bank);
		return;
	}
	set_bit(bank, mce_banks_ce_disabled);
	on_each_cpu(__mce_disable_bank, &bank, 1);
}

H
Hidetoshi Seto 已提交
2116
/*
2117 2118
 * mce=off Disables machine check
 * mce=no_cmci Disables CMCI
2119
 * mce=no_lmce Disables LMCE
2120 2121
 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2122 2123 2124
 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
 *	monarchtimeout is how long to wait for other CPUs on machine
 *	check, or 0 to not wait
H
Hidetoshi Seto 已提交
2125 2126
 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
 * mce=nobootlog Don't log MCEs from before booting.
2127
 * mce=bios_cmci_threshold Don't program the CMCI threshold
2128
 * mce=recovery force enable memcpy_mcsafe()
H
Hidetoshi Seto 已提交
2129
 */
L
Linus Torvalds 已提交
2130 2131
static int __init mcheck_enable(char *str)
{
2132 2133
	struct mca_config *cfg = &mca_cfg;

2134
	if (*str == 0) {
2135
		enable_p5_mce();
2136 2137
		return 1;
	}
2138 2139
	if (*str == '=')
		str++;
L
Linus Torvalds 已提交
2140
	if (!strcmp(str, "off"))
2141
		cfg->disabled = true;
2142
	else if (!strcmp(str, "no_cmci"))
2143
		cfg->cmci_disabled = true;
2144 2145
	else if (!strcmp(str, "no_lmce"))
		cfg->lmce_disabled = true;
2146
	else if (!strcmp(str, "dont_log_ce"))
2147
		cfg->dont_log_ce = true;
2148
	else if (!strcmp(str, "ignore_ce"))
2149
		cfg->ignore_ce = true;
H
Hidetoshi Seto 已提交
2150
	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2151
		cfg->bootlog = (str[0] == 'b');
2152
	else if (!strcmp(str, "bios_cmci_threshold"))
2153
		cfg->bios_cmci_threshold = true;
2154 2155
	else if (!strcmp(str, "recovery"))
		cfg->recovery = true;
2156
	else if (isdigit(str[0])) {
2157
		if (get_option(&str, &cfg->tolerant) == 2)
2158
			get_option(&str, &(cfg->monarch_timeout));
2159
	} else {
2160
		pr_info("mce argument %s ignored. Please use /sys\n", str);
H
Hidetoshi Seto 已提交
2161 2162
		return 0;
	}
2163
	return 1;
L
Linus Torvalds 已提交
2164
}
2165
__setup("mce", mcheck_enable);
L
Linus Torvalds 已提交
2166

2167
int __init mcheck_init(void)
2168
{
2169
	mcheck_intel_therm_init();
2170
	mce_register_decode_chain(&mce_srao_nb);
2171
	mce_register_decode_chain(&mce_default_nb);
2172
	mcheck_vendor_init_severity();
2173

2174 2175 2176
	INIT_WORK(&mce_work, mce_process_work);
	init_irq_work(&mce_irq_work, mce_irq_work_cb);

2177 2178 2179
	return 0;
}

2180
/*
2181
 * mce_syscore: PM support
2182
 */
L
Linus Torvalds 已提交
2183

2184 2185 2186 2187
/*
 * Disable machine checks on suspend and shutdown. We can't really handle
 * them later.
 */
2188
static void mce_disable_error_reporting(void)
2189 2190 2191
{
	int i;

2192
	for (i = 0; i < mca_cfg.banks; i++) {
2193
		struct mce_bank *b = &mce_banks[i];
2194

2195
		if (b->init)
2196
			wrmsrl(msr_ops.ctl(i), 0);
2197
	}
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
	return;
}

static void vendor_disable_error_reporting(void)
{
	/*
	 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
	 * Disabling them for just a single offlined CPU is bad, since it will
	 * inhibit reporting for all shared resources on the socket like the
	 * last level cache (LLC), the integrated memory controller (iMC), etc.
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		return;

	mce_disable_error_reporting();
2213 2214
}

2215
static int mce_syscore_suspend(void)
2216
{
2217 2218
	vendor_disable_error_reporting();
	return 0;
2219 2220
}

2221
static void mce_syscore_shutdown(void)
2222
{
2223
	vendor_disable_error_reporting();
2224 2225
}

I
Ingo Molnar 已提交
2226 2227 2228 2229 2230
/*
 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
 * Only one CPU is active at this time, the others get re-added later using
 * CPU hotplug:
 */
2231
static void mce_syscore_resume(void)
L
Linus Torvalds 已提交
2232
{
2233
	__mcheck_cpu_init_generic();
2234
	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2235
	__mcheck_cpu_init_clear_banks();
L
Linus Torvalds 已提交
2236 2237
}

2238
static struct syscore_ops mce_syscore_ops = {
2239 2240 2241
	.suspend	= mce_syscore_suspend,
	.shutdown	= mce_syscore_shutdown,
	.resume		= mce_syscore_resume,
2242 2243
};

2244
/*
2245
 * mce_device: Sysfs support
2246 2247
 */

2248 2249
static void mce_cpu_restart(void *data)
{
2250
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2251
		return;
2252
	__mcheck_cpu_init_generic();
2253
	__mcheck_cpu_init_clear_banks();
2254
	__mcheck_cpu_init_timer();
2255 2256
}

L
Linus Torvalds 已提交
2257
/* Reinit MCEs after user configuration changes */
2258 2259
static void mce_restart(void)
{
2260
	mce_timer_delete_all();
2261
	on_each_cpu(mce_cpu_restart, NULL, 1);
L
Linus Torvalds 已提交
2262 2263
}

2264
/* Toggle features for corrected errors */
2265
static void mce_disable_cmci(void *data)
2266
{
2267
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2268 2269 2270 2271 2272 2273
		return;
	cmci_clear();
}

static void mce_enable_ce(void *all)
{
2274
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2275 2276 2277 2278
		return;
	cmci_reenable();
	cmci_recheck();
	if (all)
2279
		__mcheck_cpu_init_timer();
2280 2281
}

2282
static struct bus_type mce_subsys = {
I
Ingo Molnar 已提交
2283
	.name		= "machinecheck",
2284
	.dev_name	= "machinecheck",
L
Linus Torvalds 已提交
2285 2286
};

2287
DEFINE_PER_CPU(struct device *, mce_device);
I
Ingo Molnar 已提交
2288

2289
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2290 2291 2292
{
	return container_of(attr, struct mce_bank, attr);
}
2293

2294
static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2295 2296
			 char *buf)
{
2297
	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2298 2299
}

2300
static ssize_t set_bank(struct device *s, struct device_attribute *attr,
H
Hidetoshi Seto 已提交
2301
			const char *buf, size_t size)
2302
{
H
Hidetoshi Seto 已提交
2303
	u64 new;
I
Ingo Molnar 已提交
2304

2305
	if (kstrtou64(buf, 0, &new) < 0)
2306
		return -EINVAL;
I
Ingo Molnar 已提交
2307

2308
	attr_to_bank(attr)->ctl = new;
2309
	mce_restart();
I
Ingo Molnar 已提交
2310

H
Hidetoshi Seto 已提交
2311
	return size;
2312
}
2313

I
Ingo Molnar 已提交
2314
static ssize_t
2315
show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2316
{
2317
	strcpy(buf, mce_helper);
2318
	strcat(buf, "\n");
2319
	return strlen(mce_helper) + 1;
2320 2321
}

2322
static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
I
Ingo Molnar 已提交
2323
				const char *buf, size_t siz)
2324 2325
{
	char *p;
I
Ingo Molnar 已提交
2326

2327 2328 2329
	strncpy(mce_helper, buf, sizeof(mce_helper));
	mce_helper[sizeof(mce_helper)-1] = 0;
	p = strchr(mce_helper, '\n');
I
Ingo Molnar 已提交
2330

2331
	if (p)
I
Ingo Molnar 已提交
2332 2333
		*p = 0;

2334
	return strlen(mce_helper) + !!p;
2335 2336
}

2337 2338
static ssize_t set_ignore_ce(struct device *s,
			     struct device_attribute *attr,
2339 2340 2341 2342
			     const char *buf, size_t size)
{
	u64 new;

2343
	if (kstrtou64(buf, 0, &new) < 0)
2344 2345
		return -EINVAL;

2346
	if (mca_cfg.ignore_ce ^ !!new) {
2347 2348
		if (new) {
			/* disable ce features */
2349 2350
			mce_timer_delete_all();
			on_each_cpu(mce_disable_cmci, NULL, 1);
2351
			mca_cfg.ignore_ce = true;
2352 2353
		} else {
			/* enable ce features */
2354
			mca_cfg.ignore_ce = false;
2355 2356 2357 2358 2359 2360
			on_each_cpu(mce_enable_ce, (void *)1, 1);
		}
	}
	return size;
}

2361 2362
static ssize_t set_cmci_disabled(struct device *s,
				 struct device_attribute *attr,
2363 2364 2365 2366
				 const char *buf, size_t size)
{
	u64 new;

2367
	if (kstrtou64(buf, 0, &new) < 0)
2368 2369
		return -EINVAL;

2370
	if (mca_cfg.cmci_disabled ^ !!new) {
2371 2372
		if (new) {
			/* disable cmci */
2373
			on_each_cpu(mce_disable_cmci, NULL, 1);
2374
			mca_cfg.cmci_disabled = true;
2375 2376
		} else {
			/* enable cmci */
2377
			mca_cfg.cmci_disabled = false;
2378 2379 2380 2381 2382 2383
			on_each_cpu(mce_enable_ce, NULL, 1);
		}
	}
	return size;
}

2384 2385
static ssize_t store_int_with_restart(struct device *s,
				      struct device_attribute *attr,
2386 2387
				      const char *buf, size_t size)
{
2388
	ssize_t ret = device_store_int(s, attr, buf, size);
2389 2390 2391 2392
	mce_restart();
	return ret;
}

2393
static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2394
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2395
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2396
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
I
Ingo Molnar 已提交
2397

2398 2399
static struct dev_ext_attribute dev_attr_check_interval = {
	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2400 2401
	&check_interval
};
I
Ingo Molnar 已提交
2402

2403
static struct dev_ext_attribute dev_attr_ignore_ce = {
2404 2405
	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
	&mca_cfg.ignore_ce
2406 2407
};

2408
static struct dev_ext_attribute dev_attr_cmci_disabled = {
2409 2410
	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
	&mca_cfg.cmci_disabled
2411 2412
};

2413 2414 2415 2416 2417 2418 2419 2420
static struct device_attribute *mce_device_attrs[] = {
	&dev_attr_tolerant.attr,
	&dev_attr_check_interval.attr,
	&dev_attr_trigger,
	&dev_attr_monarch_timeout.attr,
	&dev_attr_dont_log_ce.attr,
	&dev_attr_ignore_ce.attr,
	&dev_attr_cmci_disabled.attr,
2421 2422
	NULL
};
L
Linus Torvalds 已提交
2423

2424
static cpumask_var_t mce_device_initialized;
2425

2426 2427 2428 2429 2430
static void mce_device_release(struct device *dev)
{
	kfree(dev);
}

2431
/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2432
static int mce_device_create(unsigned int cpu)
L
Linus Torvalds 已提交
2433
{
2434
	struct device *dev;
L
Linus Torvalds 已提交
2435
	int err;
2436
	int i, j;
2437

A
Andreas Herrmann 已提交
2438
	if (!mce_available(&boot_cpu_data))
2439 2440
		return -EIO;

2441 2442 2443 2444
	dev = per_cpu(mce_device, cpu);
	if (dev)
		return 0;

2445 2446 2447
	dev = kzalloc(sizeof *dev, GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
2448 2449
	dev->id  = cpu;
	dev->bus = &mce_subsys;
2450
	dev->release = &mce_device_release;
2451

2452
	err = device_register(dev);
2453 2454
	if (err) {
		put_device(dev);
2455
		return err;
2456
	}
2457

2458 2459
	for (i = 0; mce_device_attrs[i]; i++) {
		err = device_create_file(dev, mce_device_attrs[i]);
2460 2461 2462
		if (err)
			goto error;
	}
2463
	for (j = 0; j < mca_cfg.banks; j++) {
2464
		err = device_create_file(dev, &mce_banks[j].attr);
2465 2466 2467
		if (err)
			goto error2;
	}
2468
	cpumask_set_cpu(cpu, mce_device_initialized);
2469
	per_cpu(mce_device, cpu) = dev;
2470

2471
	return 0;
2472
error2:
2473
	while (--j >= 0)
2474
		device_remove_file(dev, &mce_banks[j].attr);
2475
error:
I
Ingo Molnar 已提交
2476
	while (--i >= 0)
2477
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2478

2479
	device_unregister(dev);
2480

2481 2482 2483
	return err;
}

2484
static void mce_device_remove(unsigned int cpu)
2485
{
2486
	struct device *dev = per_cpu(mce_device, cpu);
2487 2488
	int i;

2489
	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2490 2491
		return;

2492 2493
	for (i = 0; mce_device_attrs[i]; i++)
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2494

2495
	for (i = 0; i < mca_cfg.banks; i++)
2496
		device_remove_file(dev, &mce_banks[i].attr);
I
Ingo Molnar 已提交
2497

2498 2499
	device_unregister(dev);
	cpumask_clear_cpu(cpu, mce_device_initialized);
2500
	per_cpu(mce_device, cpu) = NULL;
2501 2502
}

2503
/* Make sure there are no machine checks on offlined CPUs. */
2504
static void mce_disable_cpu(void)
2505
{
2506
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2507
		return;
2508

2509
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2510
		cmci_clear();
2511

2512
	vendor_disable_error_reporting();
2513 2514
}

2515
static void mce_reenable_cpu(void)
2516
{
I
Ingo Molnar 已提交
2517
	int i;
2518

2519
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2520
		return;
I
Ingo Molnar 已提交
2521

2522
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2523
		cmci_reenable();
2524
	for (i = 0; i < mca_cfg.banks; i++) {
2525
		struct mce_bank *b = &mce_banks[i];
2526

2527
		if (b->init)
2528
			wrmsrl(msr_ops.ctl(i), b->ctl);
2529
	}
2530 2531
}

2532
static int mce_cpu_dead(unsigned int cpu)
2533
{
2534
	mce_intel_hcpu_update(cpu);
2535

2536 2537 2538 2539
	/* intentionally ignoring frozen here */
	if (!cpuhp_tasks_frozen)
		cmci_rediscover();
	return 0;
2540 2541
}

2542
static int mce_cpu_online(unsigned int cpu)
2543
{
2544
	struct timer_list *t = &per_cpu(mce_timer, cpu);
2545
	int ret;
2546

2547
	mce_device_create(cpu);
B
Borislav Petkov 已提交
2548

2549 2550 2551 2552
	ret = mce_threshold_create_device(cpu);
	if (ret) {
		mce_device_remove(cpu);
		return ret;
2553
	}
2554 2555 2556
	mce_reenable_cpu();
	mce_start_timer(cpu, t);
	return 0;
2557 2558
}

2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
static int mce_cpu_pre_down(unsigned int cpu)
{
	struct timer_list *t = &per_cpu(mce_timer, cpu);

	mce_disable_cpu();
	del_timer_sync(t);
	mce_threshold_remove_device(cpu);
	mce_device_remove(cpu);
	return 0;
}
2569

2570
static __init void mce_init_banks(void)
2571 2572 2573
{
	int i;

2574
	for (i = 0; i < mca_cfg.banks; i++) {
2575
		struct mce_bank *b = &mce_banks[i];
2576
		struct device_attribute *a = &b->attr;
I
Ingo Molnar 已提交
2577

2578
		sysfs_attr_init(&a->attr);
2579 2580
		a->attr.name	= b->attrname;
		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
I
Ingo Molnar 已提交
2581 2582 2583 2584

		a->attr.mode	= 0644;
		a->show		= show_bank;
		a->store	= set_bank;
2585 2586 2587
	}
}

2588
static __init int mcheck_init_device(void)
2589
{
2590
	enum cpuhp_state hp_online;
2591 2592
	int err;

2593 2594 2595 2596
	if (!mce_available(&boot_cpu_data)) {
		err = -EIO;
		goto err_out;
	}
2597

2598 2599 2600 2601
	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
		err = -ENOMEM;
		goto err_out;
	}
2602

2603
	mce_init_banks();
2604

2605
	err = subsys_system_register(&mce_subsys, NULL);
2606
	if (err)
2607
		goto err_out_mem;
2608

2609 2610 2611 2612
	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
				mce_cpu_dead);
	if (err)
		goto err_out_mem;
2613

2614 2615 2616
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
				mce_cpu_online, mce_cpu_pre_down);
	if (err < 0)
2617
		goto err_out_online;
2618
	hp_online = err;
2619

2620 2621
	register_syscore_ops(&mce_syscore_ops);

2622
	/* register character device /dev/mcelog */
2623 2624 2625 2626 2627 2628 2629 2630
	err = misc_register(&mce_chrdev_device);
	if (err)
		goto err_register;

	return 0;

err_register:
	unregister_syscore_ops(&mce_syscore_ops);
2631
	cpuhp_remove_state(hp_online);
2632

2633 2634
err_out_online:
	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2635 2636 2637 2638 2639 2640

err_out_mem:
	free_cpumask_var(mce_device_initialized);

err_out:
	pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
I
Ingo Molnar 已提交
2641

L
Linus Torvalds 已提交
2642 2643
	return err;
}
2644
device_initcall_sync(mcheck_init_device);
I
Ingo Molnar 已提交
2645

2646 2647 2648 2649 2650
/*
 * Old style boot options parsing. Only for compatibility.
 */
static int __init mcheck_disable(char *str)
{
2651
	mca_cfg.disabled = true;
2652 2653 2654
	return 1;
}
__setup("nomce", mcheck_disable);
I
Ingo Molnar 已提交
2655

2656 2657
#ifdef CONFIG_DEBUG_FS
struct dentry *mce_get_debugfs_dir(void)
I
Ingo Molnar 已提交
2658
{
2659
	static struct dentry *dmce;
I
Ingo Molnar 已提交
2660

2661 2662
	if (!dmce)
		dmce = debugfs_create_dir("mce", NULL);
I
Ingo Molnar 已提交
2663

2664 2665
	return dmce;
}
I
Ingo Molnar 已提交
2666

2667 2668 2669
static void mce_reset(void)
{
	cpu_missing = 0;
2670
	atomic_set(&mce_fake_panicked, 0);
2671 2672 2673 2674
	atomic_set(&mce_executing, 0);
	atomic_set(&mce_callin, 0);
	atomic_set(&global_nwo, 0);
}
I
Ingo Molnar 已提交
2675

2676 2677 2678 2679
static int fake_panic_get(void *data, u64 *val)
{
	*val = fake_panic;
	return 0;
I
Ingo Molnar 已提交
2680 2681
}

2682
static int fake_panic_set(void *data, u64 val)
I
Ingo Molnar 已提交
2683
{
2684 2685 2686
	mce_reset();
	fake_panic = val;
	return 0;
I
Ingo Molnar 已提交
2687 2688
}

2689 2690
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
			fake_panic_set, "%llu\n");
2691

2692
static int __init mcheck_debugfs_init(void)
2693
{
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
	struct dentry *dmce, *ffake_panic;

	dmce = mce_get_debugfs_dir();
	if (!dmce)
		return -ENOMEM;
	ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
					  &fake_panic_fops);
	if (!ffake_panic)
		return -ENOMEM;

	return 0;
2705
}
2706 2707
#else
static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2708
#endif
2709

2710 2711 2712
DEFINE_STATIC_KEY_FALSE(mcsafe_key);
EXPORT_SYMBOL_GPL(mcsafe_key);

2713 2714
static int __init mcheck_late_init(void)
{
2715 2716 2717
	if (mca_cfg.recovery)
		static_branch_inc(&mcsafe_key);

2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
	mcheck_debugfs_init();

	/*
	 * Flush out everything that has been logged during early boot, now that
	 * everything has been initialized (workqueues, decoders, ...).
	 */
	mce_schedule_work();

	return 0;
}
late_initcall(mcheck_late_init);