mce.c 57.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Machine check handler.
I
Ingo Molnar 已提交
3
 *
L
Linus Torvalds 已提交
4
 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 6
 * Rest from unknown author(s).
 * 2004 Andi Kleen. Rewrote most of it.
7 8
 * Copyright 2008 Intel Corporation
 * Author: Andi Kleen
L
Linus Torvalds 已提交
9
 */
10 11 12

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

I
Ingo Molnar 已提交
13 14 15 16 17 18
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/ratelimit.h>
#include <linux/rcupdate.h>
#include <linux/kobject.h>
19
#include <linux/uaccess.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
23
#include <linux/string.h>
24
#include <linux/device.h>
25
#include <linux/syscore_ops.h>
26
#include <linux/delay.h>
27
#include <linux/ctype.h>
I
Ingo Molnar 已提交
28
#include <linux/sched.h>
29
#include <linux/sysfs.h>
I
Ingo Molnar 已提交
30
#include <linux/types.h>
31
#include <linux/slab.h>
I
Ingo Molnar 已提交
32 33 34
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/poll.h>
35
#include <linux/nmi.h>
I
Ingo Molnar 已提交
36
#include <linux/cpu.h>
37
#include <linux/ras.h>
38
#include <linux/smp.h>
I
Ingo Molnar 已提交
39
#include <linux/fs.h>
40
#include <linux/mm.h>
41
#include <linux/debugfs.h>
42
#include <linux/irq_work.h>
43
#include <linux/export.h>
44
#include <linux/jump_label.h>
I
Ingo Molnar 已提交
45

46
#include <asm/intel-family.h>
47
#include <asm/processor.h>
48
#include <asm/traps.h>
A
Andy Lutomirski 已提交
49
#include <asm/tlbflush.h>
I
Ingo Molnar 已提交
50 51
#include <asm/mce.h>
#include <asm/msr.h>
52
#include <asm/reboot.h>
53
#include <asm/set_memory.h>
L
Linus Torvalds 已提交
54

55
#include "mce-internal.h"
56

57
static DEFINE_MUTEX(mce_log_mutex);
58

S
Seunghun Han 已提交
59 60 61
/* sysfs synchronization */
static DEFINE_MUTEX(mce_sysfs_mutex);

62 63 64
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

65
#define SPINUNIT		100	/* 100ns */
66

67 68
DEFINE_PER_CPU(unsigned, mce_exception_count);

69
struct mce_bank *mce_banks __read_mostly;
70
struct mce_vendor_flags mce_flags __read_mostly;
71

72
struct mca_config mca_cfg __read_mostly = {
73
	.bootlog  = -1,
74 75 76 77 78 79 80
	/*
	 * Tolerant levels:
	 * 0: always panic on uncorrected errors, log corrected errors
	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
	 * 3: never panic or SIGBUS, log all errors (for testing only)
	 */
81 82
	.tolerant = 1,
	.monarch_timeout = -1
83 84
};

85
static DEFINE_PER_CPU(struct mce, mces_seen);
86 87
static unsigned long mce_need_notify;
static int cpu_missing;
88

89 90 91 92
/*
 * MCA banks polled by the period polling timer for corrected events.
 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 */
93 94 95 96
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
};

97 98 99 100 101 102 103 104 105
/*
 * MCA banks controlled through firmware first for corrected errors.
 * This is a global list of banks for which we won't enable CMCI and we
 * won't poll. Firmware controls these banks and is responsible for
 * reporting corrected errors through GHES. Uncorrected/recoverable
 * errors are still notified through a machine check.
 */
mce_banks_t mce_banks_ce_disabled;

106 107
static struct work_struct mce_work;
static struct irq_work mce_irq_work;
108

109 110
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);

111 112 113 114
#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn);
#endif

115 116 117 118
/*
 * CPU/chipset specific EDAC code can register a notifier call here to print
 * MCE errors in a human-readable form.
 */
119
BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
120

121 122 123 124
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
{
	memset(m, 0, sizeof(struct mce));
125
	m->cpu = m->extcpu = smp_processor_id();
126 127
	/* need the internal __ version to avoid deadlocks */
	m->time = __ktime_get_real_seconds();
128 129 130 131 132
	m->cpuvendor = boot_cpu_data.x86_vendor;
	m->cpuid = cpuid_eax(1);
	m->socketid = cpu_data(m->extcpu).phys_proc_id;
	m->apicid = cpu_data(m->extcpu).initial_apicid;
	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
133 134 135

	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
		rdmsrl(MSR_PPIN, m->ppin);
136 137

	m->microcode = boot_cpu_data.microcode;
138 139
}

140 141 142
DEFINE_PER_CPU(struct mce, injectm);
EXPORT_PER_CPU_SYMBOL_GPL(injectm);

143
void mce_log(struct mce *m)
L
Linus Torvalds 已提交
144
{
145
	if (!mce_gen_pool_add(m))
146
		irq_work_queue(&mce_irq_work);
L
Linus Torvalds 已提交
147 148
}

149
void mce_inject_log(struct mce *m)
B
Borislav Petkov 已提交
150
{
151
	mutex_lock(&mce_log_mutex);
152
	mce_log(m);
153
	mutex_unlock(&mce_log_mutex);
B
Borislav Petkov 已提交
154
}
155
EXPORT_SYMBOL_GPL(mce_inject_log);
B
Borislav Petkov 已提交
156

157
static struct notifier_block mce_srao_nb;
B
Borislav Petkov 已提交
158

159 160 161 162 163 164
/*
 * We run the default notifier if we have only the SRAO, the first and the
 * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
 * notifiers registered on the chain.
 */
#define NUM_DEFAULT_NOTIFIERS	3
165 166
static atomic_t num_notifiers;

167 168
void mce_register_decode_chain(struct notifier_block *nb)
{
169
	if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
170
		return;
171

172
	atomic_inc(&num_notifiers);
173

174
	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
175 176 177 178 179
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);

void mce_unregister_decode_chain(struct notifier_block *nb)
{
180 181
	atomic_dec(&num_notifiers);

182
	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
183 184 185
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static inline u32 ctl_reg(int bank)
{
	return MSR_IA32_MCx_CTL(bank);
}

static inline u32 status_reg(int bank)
{
	return MSR_IA32_MCx_STATUS(bank);
}

static inline u32 addr_reg(int bank)
{
	return MSR_IA32_MCx_ADDR(bank);
}

static inline u32 misc_reg(int bank)
{
	return MSR_IA32_MCx_MISC(bank);
}

static inline u32 smca_ctl_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_CTL(bank);
}

static inline u32 smca_status_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_STATUS(bank);
}

static inline u32 smca_addr_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_ADDR(bank);
}

static inline u32 smca_misc_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_MISC(bank);
}

struct mca_msr_regs msr_ops = {
	.ctl	= ctl_reg,
	.status	= status_reg,
	.addr	= addr_reg,
	.misc	= misc_reg
};

233
static void __print_mce(struct mce *m)
L
Linus Torvalds 已提交
234
{
235 236 237 238
	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
		 m->extcpu,
		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
		 m->mcgstatus, m->bank, m->status);
239

240
	if (m->ip) {
H
Huang Ying 已提交
241
		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
242
			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
243
			m->cs, m->ip);
244

L
Linus Torvalds 已提交
245
		if (m->cs == __KERNEL_CS)
246
			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
247
		pr_cont("\n");
L
Linus Torvalds 已提交
248
	}
249

H
Huang Ying 已提交
250
	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
L
Linus Torvalds 已提交
251
	if (m->addr)
252
		pr_cont("ADDR %llx ", m->addr);
L
Linus Torvalds 已提交
253
	if (m->misc)
254
		pr_cont("MISC %llx ", m->misc);
255

256 257 258 259 260 261 262
	if (mce_flags.smca) {
		if (m->synd)
			pr_cont("SYND %llx ", m->synd);
		if (m->ipid)
			pr_cont("IPID %llx ", m->ipid);
	}

263
	pr_cont("\n");
264 265 266 267
	/*
	 * Note this output is parsed by external tools and old fields
	 * should not be changed.
	 */
268
	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
269
		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
270
		m->microcode);
271 272 273 274 275
}

static void print_mce(struct mce *m)
{
	__print_mce(m);
276 277 278

	if (m->cpuvendor != X86_VENDOR_AMD)
		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
279 280
}

281 282
#define PANIC_TIMEOUT 5 /* 5 seconds */

283
static atomic_t mce_panicked;
284

285
static int fake_panic;
286
static atomic_t mce_fake_panicked;
287

288 289 290 291
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
{
	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
292

293 294 295 296
	preempt_disable();
	local_irq_enable();
	while (timeout-- > 0)
		udelay(1);
297
	if (panic_timeout == 0)
298
		panic_timeout = mca_cfg.panic_timeout;
299 300 301
	panic("Panicing machine check CPU died");
}

302
static void mce_panic(const char *msg, struct mce *final, char *exp)
303
{
304 305 306
	int apei_err = 0;
	struct llist_node *pending;
	struct mce_evt_llist *l;
307

308 309 310 311
	if (!fake_panic) {
		/*
		 * Make sure only one CPU runs in machine check panic
		 */
312
		if (atomic_inc_return(&mce_panicked) > 1)
313 314
			wait_for_panic();
		barrier();
315

316 317 318 319
		bust_spinlocks(1);
		console_verbose();
	} else {
		/* Don't log too much for fake panic */
320
		if (atomic_inc_return(&mce_fake_panicked) > 1)
321 322
			return;
	}
323
	pending = mce_gen_pool_prepare_records();
324
	/* First print corrected ones that are still unlogged */
325 326
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
327
		if (!(m->status & MCI_STATUS_UC)) {
H
Hidetoshi Seto 已提交
328
			print_mce(m);
329 330 331
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
332 333
	}
	/* Now print uncorrected but with the final one last */
334 335
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
H
Hidetoshi Seto 已提交
336 337
		if (!(m->status & MCI_STATUS_UC))
			continue;
338
		if (!final || mce_cmp(m, final)) {
H
Hidetoshi Seto 已提交
339
			print_mce(m);
340 341 342
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
L
Linus Torvalds 已提交
343
	}
344
	if (final) {
H
Hidetoshi Seto 已提交
345
		print_mce(final);
346 347 348
		if (!apei_err)
			apei_err = apei_write_mce(final);
	}
349
	if (cpu_missing)
H
Huang Ying 已提交
350
		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
351
	if (exp)
H
Huang Ying 已提交
352
		pr_emerg(HW_ERR "Machine check: %s\n", exp);
353 354
	if (!fake_panic) {
		if (panic_timeout == 0)
355
			panic_timeout = mca_cfg.panic_timeout;
356 357
		panic(msg);
	} else
H
Huang Ying 已提交
358
		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
359
}
L
Linus Torvalds 已提交
360

361 362 363 364
/* Support code for software error injection */

static int msr_to_offset(u32 msr)
{
T
Tejun Heo 已提交
365
	unsigned bank = __this_cpu_read(injectm.bank);
366

367
	if (msr == mca_cfg.rip_msr)
368
		return offsetof(struct mce, ip);
369
	if (msr == msr_ops.status(bank))
370
		return offsetof(struct mce, status);
371
	if (msr == msr_ops.addr(bank))
372
		return offsetof(struct mce, addr);
373
	if (msr == msr_ops.misc(bank))
374 375 376 377 378 379
		return offsetof(struct mce, misc);
	if (msr == MSR_IA32_MCG_STATUS)
		return offsetof(struct mce, mcgstatus);
	return -1;
}

380 381 382 383
/* MSR access wrappers used for error injection */
static u64 mce_rdmsrl(u32 msr)
{
	u64 v;
384

T
Tejun Heo 已提交
385
	if (__this_cpu_read(injectm.finished)) {
386
		int offset = msr_to_offset(msr);
387

388 389
		if (offset < 0)
			return 0;
390
		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
391
	}
392 393

	if (rdmsrl_safe(msr, &v)) {
394
		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
395 396 397 398 399 400 401 402
		/*
		 * Return zero in case the access faulted. This should
		 * not happen normally but can happen if the CPU does
		 * something weird, or if the code is buggy.
		 */
		v = 0;
	}

403 404 405 406 407
	return v;
}

static void mce_wrmsrl(u32 msr, u64 v)
{
T
Tejun Heo 已提交
408
	if (__this_cpu_read(injectm.finished)) {
409
		int offset = msr_to_offset(msr);
410

411
		if (offset >= 0)
412
			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
413 414
		return;
	}
415 416 417
	wrmsrl(msr, v);
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Collect all global (w.r.t. this processor) status about this machine
 * check into our "mce" struct so that we can use it later to assess
 * the severity of the problem as we read per-bank specific details.
 */
static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
{
	mce_setup(m);

	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
	if (regs) {
		/*
		 * Get the address of the instruction at the time of
		 * the machine check error.
		 */
		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
			m->ip = regs->ip;
			m->cs = regs->cs;
436 437 438 439 440 441 442 443

			/*
			 * When in VM86 mode make the cs look like ring 3
			 * always. This is a lie, but it's better than passing
			 * the additional vm86 bit around everywhere.
			 */
			if (v8086_mode(regs))
				m->cs |= 3;
444 445
		}
		/* Use accurate RIP reporting if available. */
446 447
		if (mca_cfg.rip_msr)
			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
448 449 450
	}
}

A
Andi Kleen 已提交
451
int mce_available(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
452
{
453
	if (mca_cfg.disabled)
454
		return 0;
455
	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
L
Linus Torvalds 已提交
456 457
}

458 459
static void mce_schedule_work(void)
{
460
	if (!mce_gen_pool_empty())
461
		schedule_work(&mce_work);
462 463
}

464
static void mce_irq_work_cb(struct irq_work *entry)
465
{
466
	mce_schedule_work();
467 468 469 470 471
}

static void mce_report_event(struct pt_regs *regs)
{
	if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
472
		mce_notify_irq();
473 474 475 476 477 478 479
		/*
		 * Triggering the work queue here is just an insurance
		 * policy in case the syscall exit notify handler
		 * doesn't run soon enough or ends up running on the
		 * wrong CPU (can happen when audit sleeps)
		 */
		mce_schedule_work();
480 481 482
		return;
	}

483
	irq_work_queue(&mce_irq_work);
484 485
}

486 487 488 489 490 491 492 493
/*
 * Check if the address reported by the CPU is in a format we can parse.
 * It would be possible to add code for most other cases, but all would
 * be somewhat complicated (e.g. segment offset would require an instruction
 * parser). So only support physical addresses up to page granuality for now.
 */
static int mce_usable_address(struct mce *m)
{
494
	if (!(m->status & MCI_STATUS_ADDRV))
495 496 497 498 499 500
		return 0;

	/* Checks after this one are Intel-specific: */
	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
		return 1;

501 502 503
	if (!(m->status & MCI_STATUS_MISCV))
		return 0;

504 505
	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
		return 0;
506

507 508
	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
		return 0;
509

510 511 512
	return 1;
}

B
Borislav Petkov 已提交
513
bool mce_is_memory_error(struct mce *m)
514
{
B
Borislav Petkov 已提交
515
	if (m->cpuvendor == X86_VENDOR_AMD) {
516
		return amd_mce_is_memory_error(m);
517

B
Borislav Petkov 已提交
518
	} else if (m->cpuvendor == X86_VENDOR_INTEL) {
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		/*
		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
		 *
		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
		 * indicating a memory error. Bit 8 is used for indicating a
		 * cache hierarchy error. The combination of bit 2 and bit 3
		 * is used for indicating a `generic' cache hierarchy error
		 * But we can't just blindly check the above bits, because if
		 * bit 11 is set, then it is a bus/interconnect error - and
		 * either way the above bits just gives more detail on what
		 * bus/interconnect error happened. Note that bit 12 can be
		 * ignored, as it's the "filter" bit.
		 */
		return (m->status & 0xef80) == BIT(7) ||
		       (m->status & 0xef00) == BIT(8) ||
		       (m->status & 0xeffc) == 0xc;
	}

	return false;
}
B
Borislav Petkov 已提交
539
EXPORT_SYMBOL_GPL(mce_is_memory_error);
540

541 542 543 544 545 546 547 548 549 550 551
static bool mce_is_correctable(struct mce *m)
{
	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
		return false;

	if (m->status & MCI_STATUS_UC)
		return false;

	return true;
}

552 553 554 555 556 557
static bool cec_add_mce(struct mce *m)
{
	if (!m)
		return false;

	/* We eat only correctable DRAM errors with usable addresses. */
B
Borislav Petkov 已提交
558
	if (mce_is_memory_error(m) &&
559
	    mce_is_correctable(m)  &&
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	    mce_usable_address(m))
		if (!cec_add_elem(m->addr >> PAGE_SHIFT))
			return true;

	return false;
}

static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
			      void *data)
{
	struct mce *m = (struct mce *)data;

	if (!m)
		return NOTIFY_DONE;

	if (cec_add_mce(m))
		return NOTIFY_STOP;

	/* Emit the trace record: */
	trace_mce_record(m);

	set_bit(0, &mce_need_notify);

	mce_notify_irq();

	return NOTIFY_DONE;
}

static struct notifier_block first_nb = {
	.notifier_call	= mce_first_notifier,
	.priority	= MCE_PRIO_FIRST,
};

593 594 595 596 597 598 599 600 601
static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *mce = (struct mce *)data;
	unsigned long pfn;

	if (!mce)
		return NOTIFY_DONE;

B
Borislav Petkov 已提交
602
	if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
603
		pfn = mce->addr >> PAGE_SHIFT;
604 605
		if (!memory_failure(pfn, 0))
			mce_unmap_kpfn(pfn);
606 607 608
	}

	return NOTIFY_OK;
609
}
610 611
static struct notifier_block mce_srao_nb = {
	.notifier_call	= srao_decode_notifier,
612
	.priority	= MCE_PRIO_SRAO,
613
};
614

615 616 617 618 619 620 621 622
static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *m = (struct mce *)data;

	if (!m)
		return NOTIFY_DONE;

623
	if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
624 625
		return NOTIFY_DONE;

626 627 628 629 630 631 632 633
	__print_mce(m);

	return NOTIFY_DONE;
}

static struct notifier_block mce_default_nb = {
	.notifier_call	= mce_default_notifier,
	/* lowest prio, we want it to run last. */
634
	.priority	= MCE_PRIO_LOWEST,
635 636
};

637 638 639 640 641 642
/*
 * Read ADDR and MISC registers.
 */
static void mce_read_aux(struct mce *m, int i)
{
	if (m->status & MCI_STATUS_MISCV)
643
		m->misc = mce_rdmsrl(msr_ops.misc(i));
644

645
	if (m->status & MCI_STATUS_ADDRV) {
646
		m->addr = mce_rdmsrl(msr_ops.addr(i));
647 648 649 650

		/*
		 * Mask the reported address by the reported granularity.
		 */
651
		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
652 653 654 655
			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
			m->addr >>= shift;
			m->addr <<= shift;
		}
656 657 658 659 660 661 662 663 664 665

		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m->addr >> 56) & 0x3f;

			m->addr &= GENMASK_ULL(55, lsb);
		}
666
	}
667

668 669 670 671 672 673
	if (mce_flags.smca) {
		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));

		if (m->status & MCI_STATUS_SYNDV)
			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
	}
674 675
}

676 677
DEFINE_PER_CPU(unsigned, mce_poll_count);

678
/*
679 680 681 682
 * Poll for corrected events or events that happened before reset.
 * Those are just logged through /dev/mcelog.
 *
 * This is executed in standard interrupt context.
A
Andi Kleen 已提交
683 684 685 686 687 688 689 690 691
 *
 * Note: spec recommends to panic for fatal unsignalled
 * errors here. However this would be quite problematic --
 * we would need to reimplement the Monarch handling and
 * it would mess up the exclusion between exception handler
 * and poll hander -- * so we skip this for now.
 * These cases should not happen anyways, or only when the CPU
 * is already totally * confused. In this case it's likely it will
 * not fully execute the machine check handler either.
692
 */
693
bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
694
{
695
	bool error_seen = false;
696 697 698
	struct mce m;
	int i;

699
	this_cpu_inc(mce_poll_count);
700

701
	mce_gather_info(&m, NULL);
702

703 704
	if (flags & MCP_TIMESTAMP)
		m.tsc = rdtsc();
705

706
	for (i = 0; i < mca_cfg.banks; i++) {
707
		if (!mce_banks[i].ctl || !test_bit(i, *b))
708 709 710 711 712 713 714
			continue;

		m.misc = 0;
		m.addr = 0;
		m.bank = i;

		barrier();
715
		m.status = mce_rdmsrl(msr_ops.status(i));
716 717 718 719
		if (!(m.status & MCI_STATUS_VAL))
			continue;

		/*
A
Andi Kleen 已提交
720 721
		 * Uncorrected or signalled events are handled by the exception
		 * handler when it is enabled, so don't process those here.
722 723 724
		 *
		 * TBD do the same check for MCI_STATUS_EN here?
		 */
A
Andi Kleen 已提交
725
		if (!(flags & MCP_UC) &&
726
		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
727 728
			continue;

729 730
		error_seen = true;

731
		mce_read_aux(&m, i);
732

733
		m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
734

735 736 737 738
		/*
		 * Don't get the IP here because it's unlikely to
		 * have anything to do with the actual error location.
		 */
739
		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
A
Andi Kleen 已提交
740
			mce_log(&m);
B
Borislav Petkov 已提交
741
		else if (mce_usable_address(&m)) {
742 743 744 745 746 747 748
			/*
			 * Although we skipped logging this, we still want
			 * to take action. Add to the pool so the registered
			 * notifiers will see it.
			 */
			if (!mce_gen_pool_add(&m))
				mce_schedule_work();
749
		}
750 751 752 753

		/*
		 * Clear state for this bank.
		 */
754
		mce_wrmsrl(msr_ops.status(i), 0);
755 756 757 758 759 760
	}

	/*
	 * Don't clear MCG_STATUS here because it's only defined for
	 * exceptions.
	 */
761 762

	sync_core();
763

764
	return error_seen;
765
}
766
EXPORT_SYMBOL_GPL(machine_check_poll);
767

768 769 770 771
/*
 * Do a quick check if any of the events requires a panic.
 * This decides if we keep the events around or clear them.
 */
772 773
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
			  struct pt_regs *regs)
774
{
775
	char *tmp;
776
	int i;
777

778
	for (i = 0; i < mca_cfg.banks; i++) {
779
		m->status = mce_rdmsrl(msr_ops.status(i));
780 781 782 783 784 785
		if (!(m->status & MCI_STATUS_VAL))
			continue;

		__set_bit(i, validp);
		if (quirk_no_way_out)
			quirk_no_way_out(i, m, regs);
786 787

		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
788
			mce_read_aux(m, i);
789
			*msg = tmp;
790
			return 1;
791
		}
792
	}
793
	return 0;
794 795
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809
/*
 * Variable to establish order between CPUs while scanning.
 * Each CPU spins initially until executing is equal its number.
 */
static atomic_t mce_executing;

/*
 * Defines order of CPUs on entry. First CPU becomes Monarch.
 */
static atomic_t mce_callin;

/*
 * Check if a timeout waiting for other CPUs happened.
 */
810
static int mce_timed_out(u64 *t, const char *msg)
811 812 813 814 815 816 817 818
{
	/*
	 * The others already did panic for some reason.
	 * Bail out like in a timeout.
	 * rmb() to tell the compiler that system_state
	 * might have been modified by someone else.
	 */
	rmb();
819
	if (atomic_read(&mce_panicked))
820
		wait_for_panic();
821
	if (!mca_cfg.monarch_timeout)
822 823
		goto out;
	if ((s64)*t < SPINUNIT) {
824
		if (mca_cfg.tolerant <= 1)
825
			mce_panic(msg, NULL, NULL);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		cpu_missing = 1;
		return 1;
	}
	*t -= SPINUNIT;
out:
	touch_nmi_watchdog();
	return 0;
}

/*
 * The Monarch's reign.  The Monarch is the CPU who entered
 * the machine check handler first. It waits for the others to
 * raise the exception too and then grades them. When any
 * error is fatal panic. Only then let the others continue.
 *
 * The other CPUs entering the MCE handler will be controlled by the
 * Monarch. They are called Subjects.
 *
 * This way we prevent any potential data corruption in a unrecoverable case
 * and also makes sure always all CPU's errors are examined.
 *
847
 * Also this detects the case of a machine check event coming from outer
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
 * space (not detected by any CPUs) In this case some external agent wants
 * us to shut down, so panic too.
 *
 * The other CPUs might still decide to panic if the handler happens
 * in a unrecoverable place, but in this case the system is in a semi-stable
 * state and won't corrupt anything by itself. It's ok to let the others
 * continue for a bit first.
 *
 * All the spin loops have timeouts; when a timeout happens a CPU
 * typically elects itself to be Monarch.
 */
static void mce_reign(void)
{
	int cpu;
	struct mce *m = NULL;
	int global_worst = 0;
	char *msg = NULL;
	char *nmsg = NULL;

	/*
	 * This CPU is the Monarch and the other CPUs have run
	 * through their handlers.
	 * Grade the severity of the errors of all the CPUs.
	 */
	for_each_possible_cpu(cpu) {
873 874
		int severity = mce_severity(&per_cpu(mces_seen, cpu),
					    mca_cfg.tolerant,
875
					    &nmsg, true);
876 877 878 879 880 881 882 883 884 885 886 887
		if (severity > global_worst) {
			msg = nmsg;
			global_worst = severity;
			m = &per_cpu(mces_seen, cpu);
		}
	}

	/*
	 * Cannot recover? Panic here then.
	 * This dumps all the mces in the log buffer and stops the
	 * other CPUs.
	 */
888
	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
889
		mce_panic("Fatal machine check", m, msg);
890 891 892 893 894 895 896 897 898 899 900

	/*
	 * For UC somewhere we let the CPU who detects it handle it.
	 * Also must let continue the others, otherwise the handling
	 * CPU could deadlock on a lock.
	 */

	/*
	 * No machine check event found. Must be some external
	 * source or one CPU is hung. Panic.
	 */
901
	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
902
		mce_panic("Fatal machine check from unknown source", NULL, NULL);
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

	/*
	 * Now clear all the mces_seen so that they don't reappear on
	 * the next mce.
	 */
	for_each_possible_cpu(cpu)
		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
}

static atomic_t global_nwo;

/*
 * Start of Monarch synchronization. This waits until all CPUs have
 * entered the exception handler and then determines if any of them
 * saw a fatal event that requires panic. Then it executes them
 * in the entry order.
 * TBD double check parallel CPU hotunplug
 */
H
Hidetoshi Seto 已提交
921
static int mce_start(int *no_way_out)
922
{
H
Hidetoshi Seto 已提交
923
	int order;
924
	int cpus = num_online_cpus();
925
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
926

H
Hidetoshi Seto 已提交
927 928
	if (!timeout)
		return -1;
929

H
Hidetoshi Seto 已提交
930
	atomic_add(*no_way_out, &global_nwo);
931
	/*
932 933
	 * Rely on the implied barrier below, such that global_nwo
	 * is updated before mce_callin.
934
	 */
935
	order = atomic_inc_return(&mce_callin);
936 937 938 939 940

	/*
	 * Wait for everyone.
	 */
	while (atomic_read(&mce_callin) != cpus) {
941 942
		if (mce_timed_out(&timeout,
				  "Timeout: Not all CPUs entered broadcast exception handler")) {
943
			atomic_set(&global_nwo, 0);
H
Hidetoshi Seto 已提交
944
			return -1;
945 946 947 948
		}
		ndelay(SPINUNIT);
	}

949 950 951 952
	/*
	 * mce_callin should be read before global_nwo
	 */
	smp_rmb();
953

H
Hidetoshi Seto 已提交
954 955 956 957
	if (order == 1) {
		/*
		 * Monarch: Starts executing now, the others wait.
		 */
958
		atomic_set(&mce_executing, 1);
H
Hidetoshi Seto 已提交
959 960 961 962 963 964 965 966
	} else {
		/*
		 * Subject: Now start the scanning loop one by one in
		 * the original callin order.
		 * This way when there are any shared banks it will be
		 * only seen by one CPU before cleared, avoiding duplicates.
		 */
		while (atomic_read(&mce_executing) < order) {
967 968
			if (mce_timed_out(&timeout,
					  "Timeout: Subject CPUs unable to finish machine check processing")) {
H
Hidetoshi Seto 已提交
969 970 971 972 973
				atomic_set(&global_nwo, 0);
				return -1;
			}
			ndelay(SPINUNIT);
		}
974 975 976
	}

	/*
H
Hidetoshi Seto 已提交
977
	 * Cache the global no_way_out state.
978
	 */
H
Hidetoshi Seto 已提交
979 980 981
	*no_way_out = atomic_read(&global_nwo);

	return order;
982 983 984 985 986 987 988 989 990
}

/*
 * Synchronize between CPUs after main scanning loop.
 * This invokes the bulk of the Monarch processing.
 */
static int mce_end(int order)
{
	int ret = -1;
991
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

	if (!timeout)
		goto reset;
	if (order < 0)
		goto reset;

	/*
	 * Allow others to run.
	 */
	atomic_inc(&mce_executing);

	if (order == 1) {
		/* CHECKME: Can this race with a parallel hotplug? */
		int cpus = num_online_cpus();

		/*
		 * Monarch: Wait for everyone to go through their scanning
		 * loops.
		 */
		while (atomic_read(&mce_executing) <= cpus) {
1012 1013
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU unable to finish machine check processing"))
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
				goto reset;
			ndelay(SPINUNIT);
		}

		mce_reign();
		barrier();
		ret = 0;
	} else {
		/*
		 * Subject: Wait for Monarch to finish.
		 */
		while (atomic_read(&mce_executing) != 0) {
1026 1027
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU did not finish machine check processing"))
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
				goto reset;
			ndelay(SPINUNIT);
		}

		/*
		 * Don't reset anything. That's done by the Monarch.
		 */
		return 0;
	}

	/*
	 * Reset all global state.
	 */
reset:
	atomic_set(&global_nwo, 0);
	atomic_set(&mce_callin, 0);
	barrier();

	/*
	 * Let others run again.
	 */
	atomic_set(&mce_executing, 0);
	return ret;
}

static void mce_clear_state(unsigned long *toclear)
{
	int i;

1057
	for (i = 0; i < mca_cfg.banks; i++) {
1058
		if (test_bit(i, toclear))
1059
			mce_wrmsrl(msr_ops.status(i), 0);
1060 1061 1062
	}
}

1063 1064 1065 1066 1067 1068 1069 1070
static int do_memory_failure(struct mce *m)
{
	int flags = MF_ACTION_REQUIRED;
	int ret;

	pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
	if (!(m->mcgstatus & MCG_STATUS_RIPV))
		flags |= MF_MUST_KILL;
1071
	ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1072 1073
	if (ret)
		pr_err("Memory error not recovered");
1074 1075
	else
		mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
1076 1077 1078
	return ret;
}

1079 1080
#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn)
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	unsigned long decoy_addr;

	/*
	 * Unmap this page from the kernel 1:1 mappings to make sure
	 * we don't log more errors because of speculative access to
	 * the page.
	 * We would like to just call:
	 *	set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
	 * but doing that would radically increase the odds of a
1091
	 * speculative access to the poison page because we'd have
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	 * the virtual address of the kernel 1:1 mapping sitting
	 * around in registers.
	 * Instead we get tricky.  We create a non-canonical address
	 * that looks just like the one we want, but has bit 63 flipped.
	 * This relies on set_memory_np() not checking whether we passed
	 * a legal address.
	 */

	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

	if (set_memory_np(decoy_addr, 1))
		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
}
#endif

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134

/*
 * Cases where we avoid rendezvous handler timeout:
 * 1) If this CPU is offline.
 *
 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
 *  skip those CPUs which remain looping in the 1st kernel - see
 *  crash_nmi_callback().
 *
 * Note: there still is a small window between kexec-ing and the new,
 * kdump kernel establishing a new #MC handler where a broadcasted MCE
 * might not get handled properly.
 */
static bool __mc_check_crashing_cpu(int cpu)
{
	if (cpu_is_offline(cpu) ||
	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
		u64 mcgstatus;

		mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
		if (mcgstatus & MCG_STATUS_RIPV) {
			mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
			return true;
		}
	}
	return false;
}

1135 1136 1137 1138 1139 1140 1141
/*
 * The actual machine check handler. This only handles real
 * exceptions when something got corrupted coming in through int 18.
 *
 * This is executed in NMI context not subject to normal locking rules. This
 * implies that most kernel services cannot be safely used. Don't even
 * think about putting a printk in there!
1142 1143 1144 1145
 *
 * On Intel systems this is entered on all CPUs in parallel through
 * MCE broadcast. However some CPUs might be broken beyond repair,
 * so be always careful when synchronizing with others.
L
Linus Torvalds 已提交
1146
 */
I
Ingo Molnar 已提交
1147
void do_machine_check(struct pt_regs *regs, long error_code)
L
Linus Torvalds 已提交
1148
{
1149 1150
	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1151
	struct mca_config *cfg = &mca_cfg;
1152 1153
	int cpu = smp_processor_id();
	char *msg = "Unknown";
1154 1155 1156
	struct mce m, *final;
	int worst = 0;
	int severity;
1157
	int i;
1158

1159 1160 1161 1162
	/*
	 * Establish sequential order between the CPUs entering the machine
	 * check handler.
	 */
1163
	int order = -1;
1164

1165 1166
	/*
	 * If no_way_out gets set, there is no safe way to recover from this
1167
	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1168 1169
	 */
	int no_way_out = 0;
1170

1171 1172 1173 1174 1175
	/*
	 * If kill_it gets set, there might be a way to recover from this
	 * error.
	 */
	int kill_it = 0;
1176 1177 1178 1179 1180 1181

	/*
	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
	 * on Intel.
	 */
	int lmce = 1;
1182

1183 1184
	if (__mc_check_crashing_cpu(cpu))
		return;
1185

1186
	ist_enter(regs);
1187

1188
	this_cpu_inc(mce_exception_count);
1189

1190
	mce_gather_info(&m, regs);
1191
	m.tsc = rdtsc();
1192

1193
	final = this_cpu_ptr(&mces_seen);
1194 1195
	*final = m;

1196
	memset(valid_banks, 0, sizeof(valid_banks));
1197
	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1198

L
Linus Torvalds 已提交
1199 1200
	barrier();

A
Andi Kleen 已提交
1201
	/*
1202 1203 1204
	 * When no restart IP might need to kill or panic.
	 * Assume the worst for now, but if we find the
	 * severity is MCE_AR_SEVERITY we have other options.
A
Andi Kleen 已提交
1205 1206 1207 1208
	 */
	if (!(m.mcgstatus & MCG_STATUS_RIPV))
		kill_it = 1;

1209
	/*
1210 1211
	 * Check if this MCE is signaled to only this logical processor,
	 * on Intel only.
1212
	 */
1213 1214 1215 1216
	if (m.cpuvendor == X86_VENDOR_INTEL)
		lmce = m.mcgstatus & MCG_STATUS_LMCES;

	/*
1217 1218
	 * Local machine check may already know that we have to panic.
	 * Broadcast machine check begins rendezvous in mce_start()
1219 1220
	 * Go through all banks in exclusion of the other CPUs. This way we
	 * don't report duplicated events on shared banks because the first one
1221
	 * to see it will clear it.
1222
	 */
1223 1224 1225 1226
	if (lmce) {
		if (no_way_out)
			mce_panic("Fatal local machine check", &m, msg);
	} else {
A
Ashok Raj 已提交
1227
		order = mce_start(&no_way_out);
1228
	}
A
Ashok Raj 已提交
1229

1230
	for (i = 0; i < cfg->banks; i++) {
1231
		__clear_bit(i, toclear);
1232 1233
		if (!test_bit(i, valid_banks))
			continue;
1234
		if (!mce_banks[i].ctl)
L
Linus Torvalds 已提交
1235
			continue;
1236 1237

		m.misc = 0;
L
Linus Torvalds 已提交
1238 1239 1240
		m.addr = 0;
		m.bank = i;

1241
		m.status = mce_rdmsrl(msr_ops.status(i));
L
Linus Torvalds 已提交
1242 1243 1244
		if ((m.status & MCI_STATUS_VAL) == 0)
			continue;

1245
		/*
A
Andi Kleen 已提交
1246 1247
		 * Non uncorrected or non signaled errors are handled by
		 * machine_check_poll. Leave them alone, unless this panics.
1248
		 */
1249
		if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
A
Andi Kleen 已提交
1250
			!no_way_out)
1251 1252 1253 1254 1255
			continue;

		/*
		 * Set taint even when machine check was not enabled.
		 */
1256
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1257

1258
		severity = mce_severity(&m, cfg->tolerant, NULL, true);
1259

A
Andi Kleen 已提交
1260
		/*
1261 1262
		 * When machine check was for corrected/deferred handler don't
		 * touch, unless we're panicing.
A
Andi Kleen 已提交
1263
		 */
1264 1265
		if ((severity == MCE_KEEP_SEVERITY ||
		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
A
Andi Kleen 已提交
1266 1267 1268
			continue;
		__set_bit(i, toclear);
		if (severity == MCE_NO_SEVERITY) {
1269 1270 1271 1272 1273
			/*
			 * Machine check event was not enabled. Clear, but
			 * ignore.
			 */
			continue;
L
Linus Torvalds 已提交
1274 1275
		}

1276
		mce_read_aux(&m, i);
L
Linus Torvalds 已提交
1277

1278 1279
		/* assuming valid severity level != 0 */
		m.severity = severity;
1280

1281
		mce_log(&m);
L
Linus Torvalds 已提交
1282

1283 1284 1285
		if (severity > worst) {
			*final = m;
			worst = severity;
L
Linus Torvalds 已提交
1286 1287 1288
		}
	}

1289 1290 1291
	/* mce_clear_state will clear *final, save locally for use later */
	m = *final;

1292 1293 1294
	if (!no_way_out)
		mce_clear_state(toclear);

I
Ingo Molnar 已提交
1295
	/*
1296 1297
	 * Do most of the synchronization with other CPUs.
	 * When there's any problem use only local no_way_out state.
I
Ingo Molnar 已提交
1298
	 */
A
Ashok Raj 已提交
1299 1300 1301 1302 1303
	if (!lmce) {
		if (mce_end(order) < 0)
			no_way_out = worst >= MCE_PANIC_SEVERITY;
	} else {
		/*
1304 1305 1306 1307 1308 1309
		 * If there was a fatal machine check we should have
		 * already called mce_panic earlier in this function.
		 * Since we re-read the banks, we might have found
		 * something new. Check again to see if we found a
		 * fatal error. We call "mce_severity()" again to
		 * make sure we have the right "msg".
A
Ashok Raj 已提交
1310
		 */
1311 1312 1313 1314
		if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
			mce_severity(&m, cfg->tolerant, &msg, true);
			mce_panic("Local fatal machine check!", &m, msg);
		}
A
Ashok Raj 已提交
1315
	}
1316 1317

	/*
1318 1319
	 * If tolerant is at an insane level we drop requests to kill
	 * processes and continue even when there is no way out.
1320
	 */
1321 1322 1323 1324
	if (cfg->tolerant == 3)
		kill_it = 0;
	else if (no_way_out)
		mce_panic("Fatal machine check on current CPU", &m, msg);
1325

1326 1327
	if (worst > 0)
		mce_report_event(regs);
1328
	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
B
Borislav Petkov 已提交
1329

1330
	sync_core();
1331

1332 1333
	if (worst != MCE_AR_SEVERITY && !kill_it)
		goto out_ist;
1334

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
	/* Fault was in user mode and we need to take some action */
	if ((m.cs & 3) == 3) {
		ist_begin_non_atomic(regs);
		local_irq_enable();

		if (kill_it || do_memory_failure(&m))
			force_sig(SIGBUS, current);
		local_irq_disable();
		ist_end_non_atomic();
	} else {
		if (!fixup_exception(regs, X86_TRAP_MC))
			mce_panic("Failed kernel mode recovery", &m, NULL);
1347
	}
1348 1349

out_ist:
1350
	ist_exit(regs);
L
Linus Torvalds 已提交
1351
}
1352
EXPORT_SYMBOL_GPL(do_machine_check);
L
Linus Torvalds 已提交
1353

1354
#ifndef CONFIG_MEMORY_FAILURE
1355
int memory_failure(unsigned long pfn, int flags)
1356
{
1357 1358
	/* mce_severity() should not hand us an ACTION_REQUIRED error */
	BUG_ON(flags & MF_ACTION_REQUIRED);
1359 1360 1361
	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
	       pfn);
1362 1363

	return 0;
1364
}
1365
#endif
1366

L
Linus Torvalds 已提交
1367
/*
1368 1369 1370
 * Periodic polling timer for "silent" machine check errors.  If the
 * poller finds an MCE, poll 2x faster.  When the poller finds no more
 * errors, poll 2x slower (up to check_interval seconds).
L
Linus Torvalds 已提交
1371
 */
1372
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
I
Ingo Molnar 已提交
1373

T
Thomas Gleixner 已提交
1374
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1375
static DEFINE_PER_CPU(struct timer_list, mce_timer);
L
Linus Torvalds 已提交
1376

C
Chen Gong 已提交
1377 1378 1379 1380 1381
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
	return interval;
}

1382
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
C
Chen Gong 已提交
1383

1384
static void __start_timer(struct timer_list *t, unsigned long interval)
1385
{
1386 1387
	unsigned long when = jiffies + interval;
	unsigned long flags;
1388

1389
	local_irq_save(flags);
1390

1391 1392
	if (!timer_pending(t) || time_before(when, t->expires))
		mod_timer(t, round_jiffies(when));
1393 1394

	local_irq_restore(flags);
1395 1396
}

1397
static void mce_timer_fn(struct timer_list *t)
L
Linus Torvalds 已提交
1398
{
1399
	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
T
Thomas Gleixner 已提交
1400
	unsigned long iv;
1401

1402
	WARN_ON(cpu_t != t);
1403 1404

	iv = __this_cpu_read(mce_next_interval);
1405

1406
	if (mce_available(this_cpu_ptr(&cpu_info))) {
1407
		machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1408 1409 1410 1411 1412

		if (mce_intel_cmci_poll()) {
			iv = mce_adjust_timer(iv);
			goto done;
		}
I
Ingo Molnar 已提交
1413
	}
L
Linus Torvalds 已提交
1414 1415

	/*
1416 1417
	 * Alert userspace if needed. If we logged an MCE, reduce the polling
	 * interval, otherwise increase the polling interval.
L
Linus Torvalds 已提交
1418
	 */
1419
	if (mce_notify_irq())
1420
		iv = max(iv / 2, (unsigned long) HZ/100);
1421
	else
T
Thomas Gleixner 已提交
1422
		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1423 1424

done:
T
Thomas Gleixner 已提交
1425
	__this_cpu_write(mce_next_interval, iv);
1426
	__start_timer(t, iv);
C
Chen Gong 已提交
1427
}
1428

C
Chen Gong 已提交
1429 1430 1431 1432 1433
/*
 * Ensure that the timer is firing in @interval from now.
 */
void mce_timer_kick(unsigned long interval)
{
1434
	struct timer_list *t = this_cpu_ptr(&mce_timer);
C
Chen Gong 已提交
1435 1436
	unsigned long iv = __this_cpu_read(mce_next_interval);

1437
	__start_timer(t, interval);
1438

C
Chen Gong 已提交
1439 1440
	if (interval < iv)
		__this_cpu_write(mce_next_interval, interval);
1441 1442
}

1443 1444 1445 1446 1447 1448 1449 1450 1451
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		del_timer_sync(&per_cpu(mce_timer, cpu));
}

1452
/*
1453 1454 1455
 * Notify the user(s) about new machine check events.
 * Can be called from interrupt context, but not from machine check/NMI
 * context.
1456
 */
1457
int mce_notify_irq(void)
1458
{
1459 1460 1461
	/* Not more than two messages every minute */
	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);

1462
	if (test_and_clear_bit(0, &mce_need_notify)) {
1463
		mce_work_trigger();
1464

1465
		if (__ratelimit(&ratelimit))
H
Huang Ying 已提交
1466
			pr_info(HW_ERR "Machine check events logged\n");
1467 1468

		return 1;
L
Linus Torvalds 已提交
1469
	}
1470 1471
	return 0;
}
1472
EXPORT_SYMBOL_GPL(mce_notify_irq);
1473

1474
static int __mcheck_cpu_mce_banks_init(void)
1475 1476
{
	int i;
1477
	u8 num_banks = mca_cfg.banks;
1478

1479
	mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1480 1481
	if (!mce_banks)
		return -ENOMEM;
1482 1483

	for (i = 0; i < num_banks; i++) {
1484
		struct mce_bank *b = &mce_banks[i];
1485

1486 1487 1488 1489 1490 1491
		b->ctl = -1ULL;
		b->init = 1;
	}
	return 0;
}

1492
/*
L
Linus Torvalds 已提交
1493 1494
 * Initialize Machine Checks for a CPU.
 */
1495
static int __mcheck_cpu_cap_init(void)
L
Linus Torvalds 已提交
1496
{
1497
	unsigned b;
I
Ingo Molnar 已提交
1498
	u64 cap;
L
Linus Torvalds 已提交
1499 1500

	rdmsrl(MSR_IA32_MCG_CAP, cap);
1501 1502

	b = cap & MCG_BANKCNT_MASK;
1503
	if (!mca_cfg.banks)
1504
		pr_info("CPU supports %d MCE banks\n", b);
1505

1506
	if (b > MAX_NR_BANKS) {
1507
		pr_warn("Using only %u machine check banks out of %u\n",
1508 1509 1510 1511 1512
			MAX_NR_BANKS, b);
		b = MAX_NR_BANKS;
	}

	/* Don't support asymmetric configurations today */
1513 1514 1515
	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
	mca_cfg.banks = b;

1516
	if (!mce_banks) {
H
Hidetoshi Seto 已提交
1517
		int err = __mcheck_cpu_mce_banks_init();
1518

1519 1520
		if (err)
			return err;
L
Linus Torvalds 已提交
1521
	}
1522

1523
	/* Use accurate RIP reporting if available. */
1524
	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1525
		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
L
Linus Torvalds 已提交
1526

A
Andi Kleen 已提交
1527
	if (cap & MCG_SER_P)
1528
		mca_cfg.ser = 1;
A
Andi Kleen 已提交
1529

1530 1531 1532
	return 0;
}

1533
static void __mcheck_cpu_init_generic(void)
1534
{
1535
	enum mcp_flags m_fl = 0;
I
Ingo Molnar 已提交
1536
	mce_banks_t all_banks;
1537 1538
	u64 cap;

1539 1540 1541
	if (!mca_cfg.bootlog)
		m_fl = MCP_DONTLOG;

1542 1543 1544
	/*
	 * Log the machine checks left over from the previous reset.
	 */
1545
	bitmap_fill(all_banks, MAX_NR_BANKS);
1546
	machine_check_poll(MCP_UC | m_fl, &all_banks);
L
Linus Torvalds 已提交
1547

A
Andy Lutomirski 已提交
1548
	cr4_set_bits(X86_CR4_MCE);
L
Linus Torvalds 已提交
1549

1550
	rdmsrl(MSR_IA32_MCG_CAP, cap);
L
Linus Torvalds 已提交
1551 1552
	if (cap & MCG_CTL_P)
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1553 1554 1555 1556 1557
}

static void __mcheck_cpu_init_clear_banks(void)
{
	int i;
L
Linus Torvalds 已提交
1558

1559
	for (i = 0; i < mca_cfg.banks; i++) {
1560
		struct mce_bank *b = &mce_banks[i];
1561

1562
		if (!b->init)
1563
			continue;
1564 1565
		wrmsrl(msr_ops.ctl(i), b->ctl);
		wrmsrl(msr_ops.status(i), 0);
1566
	}
L
Linus Torvalds 已提交
1567 1568
}

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
/*
 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
 * Vol 3B Table 15-20). But this confuses both the code that determines
 * whether the machine check occurred in kernel or user mode, and also
 * the severity assessment code. Pretend that EIPV was set, and take the
 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
 */
static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
{
	if (bank != 0)
		return;
	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
		return;
	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
			  MCACOD)) !=
			 (MCI_STATUS_UC|MCI_STATUS_EN|
			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
			  MCI_STATUS_AR|MCACOD_INSTR))
		return;

	m->mcgstatus |= MCG_STATUS_EIPV;
	m->ip = regs->ip;
	m->cs = regs->cs;
}

L
Linus Torvalds 已提交
1597
/* Add per CPU specific workarounds here */
1598
static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1599
{
1600 1601
	struct mca_config *cfg = &mca_cfg;

1602
	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1603
		pr_info("unknown CPU type - not enabling MCE support\n");
1604 1605 1606
		return -EOPNOTSUPP;
	}

L
Linus Torvalds 已提交
1607
	/* This should be disabled by the BIOS, but isn't always */
1608
	if (c->x86_vendor == X86_VENDOR_AMD) {
1609
		if (c->x86 == 15 && cfg->banks > 4) {
I
Ingo Molnar 已提交
1610 1611 1612 1613 1614
			/*
			 * disable GART TBL walk error reporting, which
			 * trips off incorrectly with the IOMMU & 3ware
			 * & Cerberus:
			 */
1615
			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
I
Ingo Molnar 已提交
1616
		}
1617
		if (c->x86 < 0x11 && cfg->bootlog < 0) {
I
Ingo Molnar 已提交
1618 1619 1620 1621
			/*
			 * Lots of broken BIOS around that don't clear them
			 * by default and leave crap in there. Don't log:
			 */
1622
			cfg->bootlog = 0;
I
Ingo Molnar 已提交
1623
		}
1624 1625 1626 1627
		/*
		 * Various K7s with broken bank 0 around. Always disable
		 * by default.
		 */
1628
		if (c->x86 == 6 && cfg->banks > 0)
1629
			mce_banks[0].ctl = 0;
1630

1631 1632 1633 1634 1635 1636 1637
		/*
		 * overflow_recov is supported for F15h Models 00h-0fh
		 * even though we don't have a CPUID bit for it.
		 */
		if (c->x86 == 0x15 && c->x86_model <= 0xf)
			mce_flags.overflow_recov = 1;

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
		/*
		 * Turn off MC4_MISC thresholding banks on those models since
		 * they're not supported there.
		 */
		if (c->x86 == 0x15 &&
		    (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
			int i;
			u64 hwcr;
			bool need_toggle;
			u32 msrs[] = {
1648 1649
				0x00000413, /* MC4_MISC0 */
				0xc0000408, /* MC4_MISC1 */
1650
			};
1651

1652
			rdmsrl(MSR_K7_HWCR, hwcr);
1653

1654 1655
			/* McStatusWrEn has to be set */
			need_toggle = !(hwcr & BIT(18));
1656

1657 1658
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1659

1660 1661 1662
			/* Clear CntP bit safely */
			for (i = 0; i < ARRAY_SIZE(msrs); i++)
				msr_clear_bit(msrs[i], 62);
1663

1664 1665 1666 1667
			/* restore old settings */
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr);
		}
L
Linus Torvalds 已提交
1668
	}
1669

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		/*
		 * SDM documents that on family 6 bank 0 should not be written
		 * because it aliases to another special BIOS controlled
		 * register.
		 * But it's not aliased anymore on model 0x1a+
		 * Don't ignore bank 0 completely because there could be a
		 * valid event later, merely don't write CTL0.
		 */

1680
		if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1681
			mce_banks[0].init = 0;
1682 1683 1684 1685 1686 1687

		/*
		 * All newer Intel systems support MCE broadcasting. Enable
		 * synchronization with a one second timeout.
		 */
		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1688 1689
			cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
1690

1691 1692 1693 1694
		/*
		 * There are also broken BIOSes on some Pentium M and
		 * earlier systems:
		 */
1695 1696
		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
			cfg->bootlog = 0;
1697 1698 1699

		if (c->x86 == 6 && c->x86_model == 45)
			quirk_no_way_out = quirk_sandybridge_ifu;
1700
	}
1701 1702 1703
	if (cfg->monarch_timeout < 0)
		cfg->monarch_timeout = 0;
	if (cfg->bootlog != 0)
1704
		cfg->panic_timeout = 30;
1705 1706

	return 0;
1707
}
L
Linus Torvalds 已提交
1708

1709
static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1710 1711
{
	if (c->x86 != 5)
1712 1713
		return 0;

1714 1715
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
1716
		intel_p5_mcheck_init(c);
1717
		return 1;
1718 1719 1720
		break;
	case X86_VENDOR_CENTAUR:
		winchip_mcheck_init(c);
1721
		return 1;
1722
		break;
1723 1724
	default:
		return 0;
1725
	}
1726 1727

	return 0;
1728 1729
}

1730 1731 1732 1733
/*
 * Init basic CPU features needed for early decoding of MCEs.
 */
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1734
{
1735
	if (c->x86_vendor == X86_VENDOR_AMD) {
1736 1737 1738
		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1739 1740 1741 1742 1743 1744 1745

		if (mce_flags.smca) {
			msr_ops.ctl	= smca_ctl_reg;
			msr_ops.status	= smca_status_reg;
			msr_ops.addr	= smca_addr_reg;
			msr_ops.misc	= smca_misc_reg;
		}
1746 1747
	}
}
1748

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
{
	struct mca_config *cfg = &mca_cfg;

	 /*
	  * All newer Centaur CPUs support MCE broadcasting. Enable
	  * synchronization with a one second timeout.
	  */
	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
	     c->x86 > 6) {
		if (cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
	}
}

1764 1765 1766 1767 1768 1769 1770
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_init(c);
		mce_adjust_timer = cmci_intel_adjust_timer;
		break;
1771

1772 1773
	case X86_VENDOR_AMD: {
		mce_amd_feature_init(c);
1774
		break;
1775
		}
1776 1777 1778
	case X86_VENDOR_CENTAUR:
		mce_centaur_feature_init(c);
		break;
1779

L
Linus Torvalds 已提交
1780 1781 1782 1783 1784
	default:
		break;
	}
}

1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_clear(c);
		break;
	default:
		break;
	}
}

1796
static void mce_start_timer(struct timer_list *t)
1797
{
1798
	unsigned long iv = check_interval * HZ;
1799

1800
	if (mca_cfg.ignore_ce || !iv)
1801 1802
		return;

1803 1804
	this_cpu_write(mce_next_interval, iv);
	__start_timer(t, iv);
1805 1806
}

1807 1808 1809 1810
static void __mcheck_cpu_setup_timer(void)
{
	struct timer_list *t = this_cpu_ptr(&mce_timer);

1811
	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1812 1813
}

T
Thomas Gleixner 已提交
1814 1815
static void __mcheck_cpu_init_timer(void)
{
1816
	struct timer_list *t = this_cpu_ptr(&mce_timer);
T
Thomas Gleixner 已提交
1817

1818
	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1819
	mce_start_timer(t);
T
Thomas Gleixner 已提交
1820 1821
}

A
Andi Kleen 已提交
1822 1823 1824
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
1825
	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
A
Andi Kleen 已提交
1826 1827 1828 1829 1830 1831 1832
	       smp_processor_id());
}

/* Call the installed machine check handler for this CPU setup. */
void (*machine_check_vector)(struct pt_regs *, long error_code) =
						unexpected_machine_check;

1833 1834 1835 1836 1837
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
{
	machine_check_vector(regs, error_code);
}

1838
/*
L
Linus Torvalds 已提交
1839
 * Called for each booted CPU to set up machine checks.
I
Ingo Molnar 已提交
1840
 * Must be called with preempt off:
L
Linus Torvalds 已提交
1841
 */
1842
void mcheck_cpu_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1843
{
1844
	if (mca_cfg.disabled)
1845 1846
		return;

1847 1848
	if (__mcheck_cpu_ancient_init(c))
		return;
1849

1850
	if (!mce_available(c))
L
Linus Torvalds 已提交
1851 1852
		return;

1853
	if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1854
		mca_cfg.disabled = 1;
1855 1856 1857
		return;
	}

1858
	if (mce_gen_pool_init()) {
1859
		mca_cfg.disabled = 1;
1860 1861 1862 1863
		pr_emerg("Couldn't allocate MCE records pool!\n");
		return;
	}

1864 1865
	machine_check_vector = do_machine_check;

1866
	__mcheck_cpu_init_early(c);
1867 1868
	__mcheck_cpu_init_generic();
	__mcheck_cpu_init_vendor(c);
1869
	__mcheck_cpu_init_clear_banks();
1870
	__mcheck_cpu_setup_timer();
L
Linus Torvalds 已提交
1871 1872
}

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
/*
 * Called for each booted CPU to clear some machine checks opt-ins
 */
void mcheck_cpu_clear(struct cpuinfo_x86 *c)
{
	if (mca_cfg.disabled)
		return;

	if (!mce_available(c))
		return;

	/*
	 * Possibly to clear general settings generic to x86
	 * __mcheck_cpu_clear_generic(c);
	 */
	__mcheck_cpu_clear_vendor(c);

L
Linus Torvalds 已提交
1890 1891
}

1892 1893 1894
static void __mce_disable_bank(void *arg)
{
	int bank = *((int *)arg);
1895
	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
	cmci_disable_bank(bank);
}

void mce_disable_bank(int bank)
{
	if (bank >= mca_cfg.banks) {
		pr_warn(FW_BUG
			"Ignoring request to disable invalid MCA bank %d.\n",
			bank);
		return;
	}
	set_bit(bank, mce_banks_ce_disabled);
	on_each_cpu(__mce_disable_bank, &bank, 1);
}

H
Hidetoshi Seto 已提交
1911
/*
1912 1913
 * mce=off Disables machine check
 * mce=no_cmci Disables CMCI
1914
 * mce=no_lmce Disables LMCE
1915 1916
 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1917 1918 1919
 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
 *	monarchtimeout is how long to wait for other CPUs on machine
 *	check, or 0 to not wait
1920 1921
 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
	and older.
H
Hidetoshi Seto 已提交
1922
 * mce=nobootlog Don't log MCEs from before booting.
1923
 * mce=bios_cmci_threshold Don't program the CMCI threshold
1924
 * mce=recovery force enable memcpy_mcsafe()
H
Hidetoshi Seto 已提交
1925
 */
L
Linus Torvalds 已提交
1926 1927
static int __init mcheck_enable(char *str)
{
1928 1929
	struct mca_config *cfg = &mca_cfg;

1930
	if (*str == 0) {
1931
		enable_p5_mce();
1932 1933
		return 1;
	}
1934 1935
	if (*str == '=')
		str++;
L
Linus Torvalds 已提交
1936
	if (!strcmp(str, "off"))
1937
		cfg->disabled = 1;
1938
	else if (!strcmp(str, "no_cmci"))
1939
		cfg->cmci_disabled = true;
1940
	else if (!strcmp(str, "no_lmce"))
1941
		cfg->lmce_disabled = 1;
1942
	else if (!strcmp(str, "dont_log_ce"))
1943
		cfg->dont_log_ce = true;
1944
	else if (!strcmp(str, "ignore_ce"))
1945
		cfg->ignore_ce = true;
H
Hidetoshi Seto 已提交
1946
	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1947
		cfg->bootlog = (str[0] == 'b');
1948
	else if (!strcmp(str, "bios_cmci_threshold"))
1949
		cfg->bios_cmci_threshold = 1;
1950
	else if (!strcmp(str, "recovery"))
1951
		cfg->recovery = 1;
1952
	else if (isdigit(str[0])) {
1953
		if (get_option(&str, &cfg->tolerant) == 2)
1954
			get_option(&str, &(cfg->monarch_timeout));
1955
	} else {
1956
		pr_info("mce argument %s ignored. Please use /sys\n", str);
H
Hidetoshi Seto 已提交
1957 1958
		return 0;
	}
1959
	return 1;
L
Linus Torvalds 已提交
1960
}
1961
__setup("mce", mcheck_enable);
L
Linus Torvalds 已提交
1962

1963
int __init mcheck_init(void)
1964
{
1965
	mcheck_intel_therm_init();
1966
	mce_register_decode_chain(&first_nb);
1967
	mce_register_decode_chain(&mce_srao_nb);
1968
	mce_register_decode_chain(&mce_default_nb);
1969
	mcheck_vendor_init_severity();
1970

1971
	INIT_WORK(&mce_work, mce_gen_pool_process);
1972 1973
	init_irq_work(&mce_irq_work, mce_irq_work_cb);

1974 1975 1976
	return 0;
}

1977
/*
1978
 * mce_syscore: PM support
1979
 */
L
Linus Torvalds 已提交
1980

1981 1982 1983 1984
/*
 * Disable machine checks on suspend and shutdown. We can't really handle
 * them later.
 */
1985
static void mce_disable_error_reporting(void)
1986 1987 1988
{
	int i;

1989
	for (i = 0; i < mca_cfg.banks; i++) {
1990
		struct mce_bank *b = &mce_banks[i];
1991

1992
		if (b->init)
1993
			wrmsrl(msr_ops.ctl(i), 0);
1994
	}
1995 1996 1997 1998 1999 2000
	return;
}

static void vendor_disable_error_reporting(void)
{
	/*
2001
	 * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
2002 2003 2004 2005
	 * Disabling them for just a single offlined CPU is bad, since it will
	 * inhibit reporting for all shared resources on the socket like the
	 * last level cache (LLC), the integrated memory controller (iMC), etc.
	 */
2006 2007
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2008 2009 2010
		return;

	mce_disable_error_reporting();
2011 2012
}

2013
static int mce_syscore_suspend(void)
2014
{
2015 2016
	vendor_disable_error_reporting();
	return 0;
2017 2018
}

2019
static void mce_syscore_shutdown(void)
2020
{
2021
	vendor_disable_error_reporting();
2022 2023
}

I
Ingo Molnar 已提交
2024 2025 2026 2027 2028
/*
 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
 * Only one CPU is active at this time, the others get re-added later using
 * CPU hotplug:
 */
2029
static void mce_syscore_resume(void)
L
Linus Torvalds 已提交
2030
{
2031
	__mcheck_cpu_init_generic();
2032
	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2033
	__mcheck_cpu_init_clear_banks();
L
Linus Torvalds 已提交
2034 2035
}

2036
static struct syscore_ops mce_syscore_ops = {
2037 2038 2039
	.suspend	= mce_syscore_suspend,
	.shutdown	= mce_syscore_shutdown,
	.resume		= mce_syscore_resume,
2040 2041
};

2042
/*
2043
 * mce_device: Sysfs support
2044 2045
 */

2046 2047
static void mce_cpu_restart(void *data)
{
2048
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2049
		return;
2050
	__mcheck_cpu_init_generic();
2051
	__mcheck_cpu_init_clear_banks();
2052
	__mcheck_cpu_init_timer();
2053 2054
}

L
Linus Torvalds 已提交
2055
/* Reinit MCEs after user configuration changes */
2056 2057
static void mce_restart(void)
{
2058
	mce_timer_delete_all();
2059
	on_each_cpu(mce_cpu_restart, NULL, 1);
L
Linus Torvalds 已提交
2060 2061
}

2062
/* Toggle features for corrected errors */
2063
static void mce_disable_cmci(void *data)
2064
{
2065
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2066 2067 2068 2069 2070 2071
		return;
	cmci_clear();
}

static void mce_enable_ce(void *all)
{
2072
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2073 2074 2075 2076
		return;
	cmci_reenable();
	cmci_recheck();
	if (all)
2077
		__mcheck_cpu_init_timer();
2078 2079
}

2080
static struct bus_type mce_subsys = {
I
Ingo Molnar 已提交
2081
	.name		= "machinecheck",
2082
	.dev_name	= "machinecheck",
L
Linus Torvalds 已提交
2083 2084
};

2085
DEFINE_PER_CPU(struct device *, mce_device);
I
Ingo Molnar 已提交
2086

2087
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2088 2089 2090
{
	return container_of(attr, struct mce_bank, attr);
}
2091

2092
static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2093 2094
			 char *buf)
{
2095
	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2096 2097
}

2098
static ssize_t set_bank(struct device *s, struct device_attribute *attr,
H
Hidetoshi Seto 已提交
2099
			const char *buf, size_t size)
2100
{
H
Hidetoshi Seto 已提交
2101
	u64 new;
I
Ingo Molnar 已提交
2102

2103
	if (kstrtou64(buf, 0, &new) < 0)
2104
		return -EINVAL;
I
Ingo Molnar 已提交
2105

2106
	attr_to_bank(attr)->ctl = new;
2107
	mce_restart();
I
Ingo Molnar 已提交
2108

H
Hidetoshi Seto 已提交
2109
	return size;
2110
}
2111

2112 2113
static ssize_t set_ignore_ce(struct device *s,
			     struct device_attribute *attr,
2114 2115 2116 2117
			     const char *buf, size_t size)
{
	u64 new;

2118
	if (kstrtou64(buf, 0, &new) < 0)
2119 2120
		return -EINVAL;

S
Seunghun Han 已提交
2121
	mutex_lock(&mce_sysfs_mutex);
2122
	if (mca_cfg.ignore_ce ^ !!new) {
2123 2124
		if (new) {
			/* disable ce features */
2125 2126
			mce_timer_delete_all();
			on_each_cpu(mce_disable_cmci, NULL, 1);
2127
			mca_cfg.ignore_ce = true;
2128 2129
		} else {
			/* enable ce features */
2130
			mca_cfg.ignore_ce = false;
2131 2132 2133
			on_each_cpu(mce_enable_ce, (void *)1, 1);
		}
	}
S
Seunghun Han 已提交
2134 2135
	mutex_unlock(&mce_sysfs_mutex);

2136 2137 2138
	return size;
}

2139 2140
static ssize_t set_cmci_disabled(struct device *s,
				 struct device_attribute *attr,
2141 2142 2143 2144
				 const char *buf, size_t size)
{
	u64 new;

2145
	if (kstrtou64(buf, 0, &new) < 0)
2146 2147
		return -EINVAL;

S
Seunghun Han 已提交
2148
	mutex_lock(&mce_sysfs_mutex);
2149
	if (mca_cfg.cmci_disabled ^ !!new) {
2150 2151
		if (new) {
			/* disable cmci */
2152
			on_each_cpu(mce_disable_cmci, NULL, 1);
2153
			mca_cfg.cmci_disabled = true;
2154 2155
		} else {
			/* enable cmci */
2156
			mca_cfg.cmci_disabled = false;
2157 2158 2159
			on_each_cpu(mce_enable_ce, NULL, 1);
		}
	}
S
Seunghun Han 已提交
2160 2161
	mutex_unlock(&mce_sysfs_mutex);

2162 2163 2164
	return size;
}

2165 2166
static ssize_t store_int_with_restart(struct device *s,
				      struct device_attribute *attr,
2167 2168
				      const char *buf, size_t size)
{
S
Seunghun Han 已提交
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
	unsigned long old_check_interval = check_interval;
	ssize_t ret = device_store_ulong(s, attr, buf, size);

	if (check_interval == old_check_interval)
		return ret;

	if (check_interval < 1)
		check_interval = 1;

	mutex_lock(&mce_sysfs_mutex);
2179
	mce_restart();
S
Seunghun Han 已提交
2180 2181
	mutex_unlock(&mce_sysfs_mutex);

2182 2183 2184
	return ret;
}

2185
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2186
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2187
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
I
Ingo Molnar 已提交
2188

2189 2190
static struct dev_ext_attribute dev_attr_check_interval = {
	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2191 2192
	&check_interval
};
I
Ingo Molnar 已提交
2193

2194
static struct dev_ext_attribute dev_attr_ignore_ce = {
2195 2196
	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
	&mca_cfg.ignore_ce
2197 2198
};

2199
static struct dev_ext_attribute dev_attr_cmci_disabled = {
2200 2201
	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
	&mca_cfg.cmci_disabled
2202 2203
};

2204 2205 2206
static struct device_attribute *mce_device_attrs[] = {
	&dev_attr_tolerant.attr,
	&dev_attr_check_interval.attr,
2207
#ifdef CONFIG_X86_MCELOG_LEGACY
2208
	&dev_attr_trigger,
2209
#endif
2210 2211 2212 2213
	&dev_attr_monarch_timeout.attr,
	&dev_attr_dont_log_ce.attr,
	&dev_attr_ignore_ce.attr,
	&dev_attr_cmci_disabled.attr,
2214 2215
	NULL
};
L
Linus Torvalds 已提交
2216

2217
static cpumask_var_t mce_device_initialized;
2218

2219 2220 2221 2222 2223
static void mce_device_release(struct device *dev)
{
	kfree(dev);
}

2224
/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2225
static int mce_device_create(unsigned int cpu)
L
Linus Torvalds 已提交
2226
{
2227
	struct device *dev;
L
Linus Torvalds 已提交
2228
	int err;
2229
	int i, j;
2230

A
Andreas Herrmann 已提交
2231
	if (!mce_available(&boot_cpu_data))
2232 2233
		return -EIO;

2234 2235 2236 2237
	dev = per_cpu(mce_device, cpu);
	if (dev)
		return 0;

2238 2239 2240
	dev = kzalloc(sizeof *dev, GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
2241 2242
	dev->id  = cpu;
	dev->bus = &mce_subsys;
2243
	dev->release = &mce_device_release;
2244

2245
	err = device_register(dev);
2246 2247
	if (err) {
		put_device(dev);
2248
		return err;
2249
	}
2250

2251 2252
	for (i = 0; mce_device_attrs[i]; i++) {
		err = device_create_file(dev, mce_device_attrs[i]);
2253 2254 2255
		if (err)
			goto error;
	}
2256
	for (j = 0; j < mca_cfg.banks; j++) {
2257
		err = device_create_file(dev, &mce_banks[j].attr);
2258 2259 2260
		if (err)
			goto error2;
	}
2261
	cpumask_set_cpu(cpu, mce_device_initialized);
2262
	per_cpu(mce_device, cpu) = dev;
2263

2264
	return 0;
2265
error2:
2266
	while (--j >= 0)
2267
		device_remove_file(dev, &mce_banks[j].attr);
2268
error:
I
Ingo Molnar 已提交
2269
	while (--i >= 0)
2270
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2271

2272
	device_unregister(dev);
2273

2274 2275 2276
	return err;
}

2277
static void mce_device_remove(unsigned int cpu)
2278
{
2279
	struct device *dev = per_cpu(mce_device, cpu);
2280 2281
	int i;

2282
	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2283 2284
		return;

2285 2286
	for (i = 0; mce_device_attrs[i]; i++)
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2287

2288
	for (i = 0; i < mca_cfg.banks; i++)
2289
		device_remove_file(dev, &mce_banks[i].attr);
I
Ingo Molnar 已提交
2290

2291 2292
	device_unregister(dev);
	cpumask_clear_cpu(cpu, mce_device_initialized);
2293
	per_cpu(mce_device, cpu) = NULL;
2294 2295
}

2296
/* Make sure there are no machine checks on offlined CPUs. */
2297
static void mce_disable_cpu(void)
2298
{
2299
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2300
		return;
2301

2302
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2303
		cmci_clear();
2304

2305
	vendor_disable_error_reporting();
2306 2307
}

2308
static void mce_reenable_cpu(void)
2309
{
I
Ingo Molnar 已提交
2310
	int i;
2311

2312
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2313
		return;
I
Ingo Molnar 已提交
2314

2315
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2316
		cmci_reenable();
2317
	for (i = 0; i < mca_cfg.banks; i++) {
2318
		struct mce_bank *b = &mce_banks[i];
2319

2320
		if (b->init)
2321
			wrmsrl(msr_ops.ctl(i), b->ctl);
2322
	}
2323 2324
}

2325
static int mce_cpu_dead(unsigned int cpu)
2326
{
2327
	mce_intel_hcpu_update(cpu);
2328

2329 2330 2331 2332
	/* intentionally ignoring frozen here */
	if (!cpuhp_tasks_frozen)
		cmci_rediscover();
	return 0;
2333 2334
}

2335
static int mce_cpu_online(unsigned int cpu)
2336
{
2337
	struct timer_list *t = this_cpu_ptr(&mce_timer);
2338
	int ret;
2339

2340
	mce_device_create(cpu);
B
Borislav Petkov 已提交
2341

2342 2343 2344 2345
	ret = mce_threshold_create_device(cpu);
	if (ret) {
		mce_device_remove(cpu);
		return ret;
2346
	}
2347
	mce_reenable_cpu();
2348
	mce_start_timer(t);
2349
	return 0;
2350 2351
}

2352 2353
static int mce_cpu_pre_down(unsigned int cpu)
{
2354
	struct timer_list *t = this_cpu_ptr(&mce_timer);
2355 2356 2357 2358 2359 2360 2361

	mce_disable_cpu();
	del_timer_sync(t);
	mce_threshold_remove_device(cpu);
	mce_device_remove(cpu);
	return 0;
}
2362

2363
static __init void mce_init_banks(void)
2364 2365 2366
{
	int i;

2367
	for (i = 0; i < mca_cfg.banks; i++) {
2368
		struct mce_bank *b = &mce_banks[i];
2369
		struct device_attribute *a = &b->attr;
I
Ingo Molnar 已提交
2370

2371
		sysfs_attr_init(&a->attr);
2372 2373
		a->attr.name	= b->attrname;
		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
I
Ingo Molnar 已提交
2374 2375 2376 2377

		a->attr.mode	= 0644;
		a->show		= show_bank;
		a->store	= set_bank;
2378 2379 2380
	}
}

2381
static __init int mcheck_init_device(void)
2382 2383 2384
{
	int err;

2385 2386 2387 2388 2389 2390
	/*
	 * Check if we have a spare virtual bit. This will only become
	 * a problem if/when we move beyond 5-level page tables.
	 */
	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);

2391 2392 2393 2394
	if (!mce_available(&boot_cpu_data)) {
		err = -EIO;
		goto err_out;
	}
2395

2396 2397 2398 2399
	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
		err = -ENOMEM;
		goto err_out;
	}
2400

2401
	mce_init_banks();
2402

2403
	err = subsys_system_register(&mce_subsys, NULL);
2404
	if (err)
2405
		goto err_out_mem;
2406

2407 2408 2409 2410
	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
				mce_cpu_dead);
	if (err)
		goto err_out_mem;
2411

2412 2413 2414
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
				mce_cpu_online, mce_cpu_pre_down);
	if (err < 0)
2415
		goto err_out_online;
2416

2417 2418 2419 2420
	register_syscore_ops(&mce_syscore_ops);

	return 0;

2421 2422
err_out_online:
	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2423 2424 2425 2426 2427

err_out_mem:
	free_cpumask_var(mce_device_initialized);

err_out:
2428
	pr_err("Unable to init MCE device (rc: %d)\n", err);
I
Ingo Molnar 已提交
2429

L
Linus Torvalds 已提交
2430 2431
	return err;
}
2432
device_initcall_sync(mcheck_init_device);
I
Ingo Molnar 已提交
2433

2434 2435 2436 2437 2438
/*
 * Old style boot options parsing. Only for compatibility.
 */
static int __init mcheck_disable(char *str)
{
2439
	mca_cfg.disabled = 1;
2440 2441 2442
	return 1;
}
__setup("nomce", mcheck_disable);
I
Ingo Molnar 已提交
2443

2444 2445
#ifdef CONFIG_DEBUG_FS
struct dentry *mce_get_debugfs_dir(void)
I
Ingo Molnar 已提交
2446
{
2447
	static struct dentry *dmce;
I
Ingo Molnar 已提交
2448

2449 2450
	if (!dmce)
		dmce = debugfs_create_dir("mce", NULL);
I
Ingo Molnar 已提交
2451

2452 2453
	return dmce;
}
I
Ingo Molnar 已提交
2454

2455 2456 2457
static void mce_reset(void)
{
	cpu_missing = 0;
2458
	atomic_set(&mce_fake_panicked, 0);
2459 2460 2461 2462
	atomic_set(&mce_executing, 0);
	atomic_set(&mce_callin, 0);
	atomic_set(&global_nwo, 0);
}
I
Ingo Molnar 已提交
2463

2464 2465 2466 2467
static int fake_panic_get(void *data, u64 *val)
{
	*val = fake_panic;
	return 0;
I
Ingo Molnar 已提交
2468 2469
}

2470
static int fake_panic_set(void *data, u64 val)
I
Ingo Molnar 已提交
2471
{
2472 2473 2474
	mce_reset();
	fake_panic = val;
	return 0;
I
Ingo Molnar 已提交
2475 2476
}

2477 2478
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
			fake_panic_set, "%llu\n");
2479

2480
static int __init mcheck_debugfs_init(void)
2481
{
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
	struct dentry *dmce, *ffake_panic;

	dmce = mce_get_debugfs_dir();
	if (!dmce)
		return -ENOMEM;
	ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
					  &fake_panic_fops);
	if (!ffake_panic)
		return -ENOMEM;

	return 0;
2493
}
2494 2495
#else
static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2496
#endif
2497

2498 2499 2500
DEFINE_STATIC_KEY_FALSE(mcsafe_key);
EXPORT_SYMBOL_GPL(mcsafe_key);

2501 2502
static int __init mcheck_late_init(void)
{
2503 2504 2505
	if (mca_cfg.recovery)
		static_branch_inc(&mcsafe_key);

2506
	mcheck_debugfs_init();
2507
	cec_init();
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517

	/*
	 * Flush out everything that has been logged during early boot, now that
	 * everything has been initialized (workqueues, decoders, ...).
	 */
	mce_schedule_work();

	return 0;
}
late_initcall(mcheck_late_init);