mce.c 57.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Machine check handler.
I
Ingo Molnar 已提交
3
 *
L
Linus Torvalds 已提交
4
 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 6
 * Rest from unknown author(s).
 * 2004 Andi Kleen. Rewrote most of it.
7 8
 * Copyright 2008 Intel Corporation
 * Author: Andi Kleen
L
Linus Torvalds 已提交
9
 */
10 11 12

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

I
Ingo Molnar 已提交
13 14 15 16 17 18
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
#include <linux/ratelimit.h>
#include <linux/rcupdate.h>
#include <linux/kobject.h>
19
#include <linux/uaccess.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
23
#include <linux/string.h>
24
#include <linux/device.h>
25
#include <linux/syscore_ops.h>
26
#include <linux/delay.h>
27
#include <linux/ctype.h>
I
Ingo Molnar 已提交
28
#include <linux/sched.h>
29
#include <linux/sysfs.h>
I
Ingo Molnar 已提交
30
#include <linux/types.h>
31
#include <linux/slab.h>
I
Ingo Molnar 已提交
32 33 34
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/poll.h>
35
#include <linux/nmi.h>
I
Ingo Molnar 已提交
36
#include <linux/cpu.h>
37
#include <linux/ras.h>
38
#include <linux/smp.h>
I
Ingo Molnar 已提交
39
#include <linux/fs.h>
40
#include <linux/mm.h>
41
#include <linux/debugfs.h>
42
#include <linux/irq_work.h>
43
#include <linux/export.h>
44
#include <linux/jump_label.h>
I
Ingo Molnar 已提交
45

46
#include <asm/intel-family.h>
47
#include <asm/processor.h>
48
#include <asm/traps.h>
A
Andy Lutomirski 已提交
49
#include <asm/tlbflush.h>
I
Ingo Molnar 已提交
50 51
#include <asm/mce.h>
#include <asm/msr.h>
52
#include <asm/reboot.h>
53
#include <asm/set_memory.h>
L
Linus Torvalds 已提交
54

55
#include "mce-internal.h"
56

57
static DEFINE_MUTEX(mce_log_mutex);
58

S
Seunghun Han 已提交
59 60 61
/* sysfs synchronization */
static DEFINE_MUTEX(mce_sysfs_mutex);

62 63 64
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

65
#define SPINUNIT		100	/* 100ns */
66

67 68
DEFINE_PER_CPU(unsigned, mce_exception_count);

69
struct mce_bank *mce_banks __read_mostly;
70
struct mce_vendor_flags mce_flags __read_mostly;
71

72
struct mca_config mca_cfg __read_mostly = {
73
	.bootlog  = -1,
74 75 76 77 78 79 80
	/*
	 * Tolerant levels:
	 * 0: always panic on uncorrected errors, log corrected errors
	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
	 * 3: never panic or SIGBUS, log all errors (for testing only)
	 */
81 82
	.tolerant = 1,
	.monarch_timeout = -1
83 84
};

85
static DEFINE_PER_CPU(struct mce, mces_seen);
86 87
static unsigned long mce_need_notify;
static int cpu_missing;
88

89 90 91 92
/*
 * MCA banks polled by the period polling timer for corrected events.
 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 */
93 94 95 96
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
};

97 98 99 100 101 102 103 104 105
/*
 * MCA banks controlled through firmware first for corrected errors.
 * This is a global list of banks for which we won't enable CMCI and we
 * won't poll. Firmware controls these banks and is responsible for
 * reporting corrected errors through GHES. Uncorrected/recoverable
 * errors are still notified through a machine check.
 */
mce_banks_t mce_banks_ce_disabled;

106 107
static struct work_struct mce_work;
static struct irq_work mce_irq_work;
108

109 110
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);

111 112 113 114
#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn);
#endif

115 116 117 118
/*
 * CPU/chipset specific EDAC code can register a notifier call here to print
 * MCE errors in a human-readable form.
 */
119
BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
120

121 122 123 124
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
{
	memset(m, 0, sizeof(struct mce));
125
	m->cpu = m->extcpu = smp_processor_id();
126 127
	/* need the internal __ version to avoid deadlocks */
	m->time = __ktime_get_real_seconds();
128 129 130 131 132
	m->cpuvendor = boot_cpu_data.x86_vendor;
	m->cpuid = cpuid_eax(1);
	m->socketid = cpu_data(m->extcpu).phys_proc_id;
	m->apicid = cpu_data(m->extcpu).initial_apicid;
	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
133 134 135

	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
		rdmsrl(MSR_PPIN, m->ppin);
136 137

	m->microcode = boot_cpu_data.microcode;
138 139
}

140 141 142
DEFINE_PER_CPU(struct mce, injectm);
EXPORT_PER_CPU_SYMBOL_GPL(injectm);

143
void mce_log(struct mce *m)
L
Linus Torvalds 已提交
144
{
145
	if (!mce_gen_pool_add(m))
146
		irq_work_queue(&mce_irq_work);
L
Linus Torvalds 已提交
147 148
}

149
void mce_inject_log(struct mce *m)
B
Borislav Petkov 已提交
150
{
151
	mutex_lock(&mce_log_mutex);
152
	mce_log(m);
153
	mutex_unlock(&mce_log_mutex);
B
Borislav Petkov 已提交
154
}
155
EXPORT_SYMBOL_GPL(mce_inject_log);
B
Borislav Petkov 已提交
156

157
static struct notifier_block mce_srao_nb;
B
Borislav Petkov 已提交
158

159 160 161 162 163 164
/*
 * We run the default notifier if we have only the SRAO, the first and the
 * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
 * notifiers registered on the chain.
 */
#define NUM_DEFAULT_NOTIFIERS	3
165 166
static atomic_t num_notifiers;

167 168
void mce_register_decode_chain(struct notifier_block *nb)
{
169
	if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
170
		return;
171

172
	atomic_inc(&num_notifiers);
173

174
	blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
175 176 177 178 179
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);

void mce_unregister_decode_chain(struct notifier_block *nb)
{
180 181
	atomic_dec(&num_notifiers);

182
	blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
183 184 185
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static inline u32 ctl_reg(int bank)
{
	return MSR_IA32_MCx_CTL(bank);
}

static inline u32 status_reg(int bank)
{
	return MSR_IA32_MCx_STATUS(bank);
}

static inline u32 addr_reg(int bank)
{
	return MSR_IA32_MCx_ADDR(bank);
}

static inline u32 misc_reg(int bank)
{
	return MSR_IA32_MCx_MISC(bank);
}

static inline u32 smca_ctl_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_CTL(bank);
}

static inline u32 smca_status_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_STATUS(bank);
}

static inline u32 smca_addr_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_ADDR(bank);
}

static inline u32 smca_misc_reg(int bank)
{
	return MSR_AMD64_SMCA_MCx_MISC(bank);
}

struct mca_msr_regs msr_ops = {
	.ctl	= ctl_reg,
	.status	= status_reg,
	.addr	= addr_reg,
	.misc	= misc_reg
};

233
static void __print_mce(struct mce *m)
L
Linus Torvalds 已提交
234
{
235 236 237 238
	pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
		 m->extcpu,
		 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
		 m->mcgstatus, m->bank, m->status);
239

240
	if (m->ip) {
H
Huang Ying 已提交
241
		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
242
			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
243
			m->cs, m->ip);
244

L
Linus Torvalds 已提交
245
		if (m->cs == __KERNEL_CS)
246
			pr_cont("{%pS}", (void *)(unsigned long)m->ip);
247
		pr_cont("\n");
L
Linus Torvalds 已提交
248
	}
249

H
Huang Ying 已提交
250
	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
L
Linus Torvalds 已提交
251
	if (m->addr)
252
		pr_cont("ADDR %llx ", m->addr);
L
Linus Torvalds 已提交
253
	if (m->misc)
254
		pr_cont("MISC %llx ", m->misc);
255

256 257 258 259 260 261 262
	if (mce_flags.smca) {
		if (m->synd)
			pr_cont("SYND %llx ", m->synd);
		if (m->ipid)
			pr_cont("IPID %llx ", m->ipid);
	}

263
	pr_cont("\n");
264 265 266 267
	/*
	 * Note this output is parsed by external tools and old fields
	 * should not be changed.
	 */
268
	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
269
		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
270
		m->microcode);
271 272 273 274 275
}

static void print_mce(struct mce *m)
{
	__print_mce(m);
276 277 278

	if (m->cpuvendor != X86_VENDOR_AMD)
		pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
279 280
}

281 282
#define PANIC_TIMEOUT 5 /* 5 seconds */

283
static atomic_t mce_panicked;
284

285
static int fake_panic;
286
static atomic_t mce_fake_panicked;
287

288 289 290 291
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
{
	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
292

293 294 295 296
	preempt_disable();
	local_irq_enable();
	while (timeout-- > 0)
		udelay(1);
297
	if (panic_timeout == 0)
298
		panic_timeout = mca_cfg.panic_timeout;
299 300 301
	panic("Panicing machine check CPU died");
}

302
static void mce_panic(const char *msg, struct mce *final, char *exp)
303
{
304 305 306
	int apei_err = 0;
	struct llist_node *pending;
	struct mce_evt_llist *l;
307

308 309 310 311
	if (!fake_panic) {
		/*
		 * Make sure only one CPU runs in machine check panic
		 */
312
		if (atomic_inc_return(&mce_panicked) > 1)
313 314
			wait_for_panic();
		barrier();
315

316 317 318 319
		bust_spinlocks(1);
		console_verbose();
	} else {
		/* Don't log too much for fake panic */
320
		if (atomic_inc_return(&mce_fake_panicked) > 1)
321 322
			return;
	}
323
	pending = mce_gen_pool_prepare_records();
324
	/* First print corrected ones that are still unlogged */
325 326
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
327
		if (!(m->status & MCI_STATUS_UC)) {
H
Hidetoshi Seto 已提交
328
			print_mce(m);
329 330 331
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
332 333
	}
	/* Now print uncorrected but with the final one last */
334 335
	llist_for_each_entry(l, pending, llnode) {
		struct mce *m = &l->mce;
H
Hidetoshi Seto 已提交
336 337
		if (!(m->status & MCI_STATUS_UC))
			continue;
338
		if (!final || mce_cmp(m, final)) {
H
Hidetoshi Seto 已提交
339
			print_mce(m);
340 341 342
			if (!apei_err)
				apei_err = apei_write_mce(m);
		}
L
Linus Torvalds 已提交
343
	}
344
	if (final) {
H
Hidetoshi Seto 已提交
345
		print_mce(final);
346 347 348
		if (!apei_err)
			apei_err = apei_write_mce(final);
	}
349
	if (cpu_missing)
H
Huang Ying 已提交
350
		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
351
	if (exp)
H
Huang Ying 已提交
352
		pr_emerg(HW_ERR "Machine check: %s\n", exp);
353 354
	if (!fake_panic) {
		if (panic_timeout == 0)
355
			panic_timeout = mca_cfg.panic_timeout;
356 357
		panic(msg);
	} else
H
Huang Ying 已提交
358
		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
359
}
L
Linus Torvalds 已提交
360

361 362 363 364
/* Support code for software error injection */

static int msr_to_offset(u32 msr)
{
T
Tejun Heo 已提交
365
	unsigned bank = __this_cpu_read(injectm.bank);
366

367
	if (msr == mca_cfg.rip_msr)
368
		return offsetof(struct mce, ip);
369
	if (msr == msr_ops.status(bank))
370
		return offsetof(struct mce, status);
371
	if (msr == msr_ops.addr(bank))
372
		return offsetof(struct mce, addr);
373
	if (msr == msr_ops.misc(bank))
374 375 376 377 378 379
		return offsetof(struct mce, misc);
	if (msr == MSR_IA32_MCG_STATUS)
		return offsetof(struct mce, mcgstatus);
	return -1;
}

380 381 382 383
/* MSR access wrappers used for error injection */
static u64 mce_rdmsrl(u32 msr)
{
	u64 v;
384

T
Tejun Heo 已提交
385
	if (__this_cpu_read(injectm.finished)) {
386
		int offset = msr_to_offset(msr);
387

388 389
		if (offset < 0)
			return 0;
390
		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
391
	}
392 393

	if (rdmsrl_safe(msr, &v)) {
394
		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
395 396 397 398 399 400 401 402
		/*
		 * Return zero in case the access faulted. This should
		 * not happen normally but can happen if the CPU does
		 * something weird, or if the code is buggy.
		 */
		v = 0;
	}

403 404 405 406 407
	return v;
}

static void mce_wrmsrl(u32 msr, u64 v)
{
T
Tejun Heo 已提交
408
	if (__this_cpu_read(injectm.finished)) {
409
		int offset = msr_to_offset(msr);
410

411
		if (offset >= 0)
412
			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
413 414
		return;
	}
415 416 417
	wrmsrl(msr, v);
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Collect all global (w.r.t. this processor) status about this machine
 * check into our "mce" struct so that we can use it later to assess
 * the severity of the problem as we read per-bank specific details.
 */
static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
{
	mce_setup(m);

	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
	if (regs) {
		/*
		 * Get the address of the instruction at the time of
		 * the machine check error.
		 */
		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
			m->ip = regs->ip;
			m->cs = regs->cs;
436 437 438 439 440 441 442 443

			/*
			 * When in VM86 mode make the cs look like ring 3
			 * always. This is a lie, but it's better than passing
			 * the additional vm86 bit around everywhere.
			 */
			if (v8086_mode(regs))
				m->cs |= 3;
444 445
		}
		/* Use accurate RIP reporting if available. */
446 447
		if (mca_cfg.rip_msr)
			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
448 449 450
	}
}

A
Andi Kleen 已提交
451
int mce_available(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
452
{
453
	if (mca_cfg.disabled)
454
		return 0;
455
	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
L
Linus Torvalds 已提交
456 457
}

458 459
static void mce_schedule_work(void)
{
460
	if (!mce_gen_pool_empty())
461
		schedule_work(&mce_work);
462 463
}

464
static void mce_irq_work_cb(struct irq_work *entry)
465
{
466
	mce_schedule_work();
467 468 469 470 471
}

static void mce_report_event(struct pt_regs *regs)
{
	if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
472
		mce_notify_irq();
473 474 475 476 477 478 479
		/*
		 * Triggering the work queue here is just an insurance
		 * policy in case the syscall exit notify handler
		 * doesn't run soon enough or ends up running on the
		 * wrong CPU (can happen when audit sleeps)
		 */
		mce_schedule_work();
480 481 482
		return;
	}

483
	irq_work_queue(&mce_irq_work);
484 485
}

486 487 488 489 490 491 492 493
/*
 * Check if the address reported by the CPU is in a format we can parse.
 * It would be possible to add code for most other cases, but all would
 * be somewhat complicated (e.g. segment offset would require an instruction
 * parser). So only support physical addresses up to page granuality for now.
 */
static int mce_usable_address(struct mce *m)
{
494
	if (!(m->status & MCI_STATUS_ADDRV))
495 496 497 498 499 500
		return 0;

	/* Checks after this one are Intel-specific: */
	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
		return 1;

501 502 503
	if (!(m->status & MCI_STATUS_MISCV))
		return 0;

504 505
	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
		return 0;
506

507 508
	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
		return 0;
509

510 511 512
	return 1;
}

B
Borislav Petkov 已提交
513
bool mce_is_memory_error(struct mce *m)
514
{
B
Borislav Petkov 已提交
515
	if (m->cpuvendor == X86_VENDOR_AMD) {
516
		return amd_mce_is_memory_error(m);
517

B
Borislav Petkov 已提交
518
	} else if (m->cpuvendor == X86_VENDOR_INTEL) {
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		/*
		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
		 *
		 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
		 * indicating a memory error. Bit 8 is used for indicating a
		 * cache hierarchy error. The combination of bit 2 and bit 3
		 * is used for indicating a `generic' cache hierarchy error
		 * But we can't just blindly check the above bits, because if
		 * bit 11 is set, then it is a bus/interconnect error - and
		 * either way the above bits just gives more detail on what
		 * bus/interconnect error happened. Note that bit 12 can be
		 * ignored, as it's the "filter" bit.
		 */
		return (m->status & 0xef80) == BIT(7) ||
		       (m->status & 0xef00) == BIT(8) ||
		       (m->status & 0xeffc) == 0xc;
	}

	return false;
}
B
Borislav Petkov 已提交
539
EXPORT_SYMBOL_GPL(mce_is_memory_error);
540

541 542 543 544 545 546 547 548 549 550 551
static bool mce_is_correctable(struct mce *m)
{
	if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
		return false;

	if (m->status & MCI_STATUS_UC)
		return false;

	return true;
}

552 553 554 555 556 557
static bool cec_add_mce(struct mce *m)
{
	if (!m)
		return false;

	/* We eat only correctable DRAM errors with usable addresses. */
B
Borislav Petkov 已提交
558
	if (mce_is_memory_error(m) &&
559
	    mce_is_correctable(m)  &&
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
	    mce_usable_address(m))
		if (!cec_add_elem(m->addr >> PAGE_SHIFT))
			return true;

	return false;
}

static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
			      void *data)
{
	struct mce *m = (struct mce *)data;

	if (!m)
		return NOTIFY_DONE;

	if (cec_add_mce(m))
		return NOTIFY_STOP;

	/* Emit the trace record: */
	trace_mce_record(m);

	set_bit(0, &mce_need_notify);

	mce_notify_irq();

	return NOTIFY_DONE;
}

static struct notifier_block first_nb = {
	.notifier_call	= mce_first_notifier,
	.priority	= MCE_PRIO_FIRST,
};

593 594 595 596 597 598 599 600 601
static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *mce = (struct mce *)data;
	unsigned long pfn;

	if (!mce)
		return NOTIFY_DONE;

B
Borislav Petkov 已提交
602
	if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
603
		pfn = mce->addr >> PAGE_SHIFT;
604 605
		if (!memory_failure(pfn, 0))
			mce_unmap_kpfn(pfn);
606 607 608
	}

	return NOTIFY_OK;
609
}
610 611
static struct notifier_block mce_srao_nb = {
	.notifier_call	= srao_decode_notifier,
612
	.priority	= MCE_PRIO_SRAO,
613
};
614

615 616 617 618 619 620 621 622
static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *m = (struct mce *)data;

	if (!m)
		return NOTIFY_DONE;

623
	if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
624 625
		return NOTIFY_DONE;

626 627 628 629 630 631 632 633
	__print_mce(m);

	return NOTIFY_DONE;
}

static struct notifier_block mce_default_nb = {
	.notifier_call	= mce_default_notifier,
	/* lowest prio, we want it to run last. */
634
	.priority	= MCE_PRIO_LOWEST,
635 636
};

637 638 639 640 641 642
/*
 * Read ADDR and MISC registers.
 */
static void mce_read_aux(struct mce *m, int i)
{
	if (m->status & MCI_STATUS_MISCV)
643
		m->misc = mce_rdmsrl(msr_ops.misc(i));
644

645
	if (m->status & MCI_STATUS_ADDRV) {
646
		m->addr = mce_rdmsrl(msr_ops.addr(i));
647 648 649 650

		/*
		 * Mask the reported address by the reported granularity.
		 */
651
		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
652 653 654 655
			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
			m->addr >>= shift;
			m->addr <<= shift;
		}
656 657 658 659 660 661 662 663 664 665

		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m->addr >> 56) & 0x3f;

			m->addr &= GENMASK_ULL(55, lsb);
		}
666
	}
667

668 669 670 671 672 673
	if (mce_flags.smca) {
		m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));

		if (m->status & MCI_STATUS_SYNDV)
			m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
	}
674 675
}

676 677
DEFINE_PER_CPU(unsigned, mce_poll_count);

678
/*
679 680 681 682
 * Poll for corrected events or events that happened before reset.
 * Those are just logged through /dev/mcelog.
 *
 * This is executed in standard interrupt context.
A
Andi Kleen 已提交
683 684 685 686 687 688 689 690 691
 *
 * Note: spec recommends to panic for fatal unsignalled
 * errors here. However this would be quite problematic --
 * we would need to reimplement the Monarch handling and
 * it would mess up the exclusion between exception handler
 * and poll hander -- * so we skip this for now.
 * These cases should not happen anyways, or only when the CPU
 * is already totally * confused. In this case it's likely it will
 * not fully execute the machine check handler either.
692
 */
693
bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
694
{
695
	bool error_seen = false;
696 697 698
	struct mce m;
	int i;

699
	this_cpu_inc(mce_poll_count);
700

701
	mce_gather_info(&m, NULL);
702

703 704
	if (flags & MCP_TIMESTAMP)
		m.tsc = rdtsc();
705

706
	for (i = 0; i < mca_cfg.banks; i++) {
707
		if (!mce_banks[i].ctl || !test_bit(i, *b))
708 709 710 711 712 713 714
			continue;

		m.misc = 0;
		m.addr = 0;
		m.bank = i;

		barrier();
715
		m.status = mce_rdmsrl(msr_ops.status(i));
716 717 718 719
		if (!(m.status & MCI_STATUS_VAL))
			continue;

		/*
A
Andi Kleen 已提交
720 721
		 * Uncorrected or signalled events are handled by the exception
		 * handler when it is enabled, so don't process those here.
722 723 724
		 *
		 * TBD do the same check for MCI_STATUS_EN here?
		 */
A
Andi Kleen 已提交
725
		if (!(flags & MCP_UC) &&
726
		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
727 728
			continue;

729 730
		error_seen = true;

731
		mce_read_aux(&m, i);
732

733
		m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
734

735 736 737 738
		/*
		 * Don't get the IP here because it's unlikely to
		 * have anything to do with the actual error location.
		 */
739
		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
A
Andi Kleen 已提交
740
			mce_log(&m);
B
Borislav Petkov 已提交
741
		else if (mce_usable_address(&m)) {
742 743 744 745 746 747 748
			/*
			 * Although we skipped logging this, we still want
			 * to take action. Add to the pool so the registered
			 * notifiers will see it.
			 */
			if (!mce_gen_pool_add(&m))
				mce_schedule_work();
749
		}
750 751 752 753

		/*
		 * Clear state for this bank.
		 */
754
		mce_wrmsrl(msr_ops.status(i), 0);
755 756 757 758 759 760
	}

	/*
	 * Don't clear MCG_STATUS here because it's only defined for
	 * exceptions.
	 */
761 762

	sync_core();
763

764
	return error_seen;
765
}
766
EXPORT_SYMBOL_GPL(machine_check_poll);
767

768 769 770 771
/*
 * Do a quick check if any of the events requires a panic.
 * This decides if we keep the events around or clear them.
 */
772 773
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
			  struct pt_regs *regs)
774
{
775
	char *tmp;
776
	int i;
777

778
	for (i = 0; i < mca_cfg.banks; i++) {
779
		m->status = mce_rdmsrl(msr_ops.status(i));
780 781 782 783 784 785
		if (!(m->status & MCI_STATUS_VAL))
			continue;

		__set_bit(i, validp);
		if (quirk_no_way_out)
			quirk_no_way_out(i, m, regs);
786 787

		if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
788
			mce_read_aux(m, i);
789
			*msg = tmp;
790
			return 1;
791
		}
792
	}
793
	return 0;
794 795
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809
/*
 * Variable to establish order between CPUs while scanning.
 * Each CPU spins initially until executing is equal its number.
 */
static atomic_t mce_executing;

/*
 * Defines order of CPUs on entry. First CPU becomes Monarch.
 */
static atomic_t mce_callin;

/*
 * Check if a timeout waiting for other CPUs happened.
 */
810
static int mce_timed_out(u64 *t, const char *msg)
811 812 813 814 815 816 817 818
{
	/*
	 * The others already did panic for some reason.
	 * Bail out like in a timeout.
	 * rmb() to tell the compiler that system_state
	 * might have been modified by someone else.
	 */
	rmb();
819
	if (atomic_read(&mce_panicked))
820
		wait_for_panic();
821
	if (!mca_cfg.monarch_timeout)
822 823
		goto out;
	if ((s64)*t < SPINUNIT) {
824
		if (mca_cfg.tolerant <= 1)
825
			mce_panic(msg, NULL, NULL);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		cpu_missing = 1;
		return 1;
	}
	*t -= SPINUNIT;
out:
	touch_nmi_watchdog();
	return 0;
}

/*
 * The Monarch's reign.  The Monarch is the CPU who entered
 * the machine check handler first. It waits for the others to
 * raise the exception too and then grades them. When any
 * error is fatal panic. Only then let the others continue.
 *
 * The other CPUs entering the MCE handler will be controlled by the
 * Monarch. They are called Subjects.
 *
 * This way we prevent any potential data corruption in a unrecoverable case
 * and also makes sure always all CPU's errors are examined.
 *
847
 * Also this detects the case of a machine check event coming from outer
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
 * space (not detected by any CPUs) In this case some external agent wants
 * us to shut down, so panic too.
 *
 * The other CPUs might still decide to panic if the handler happens
 * in a unrecoverable place, but in this case the system is in a semi-stable
 * state and won't corrupt anything by itself. It's ok to let the others
 * continue for a bit first.
 *
 * All the spin loops have timeouts; when a timeout happens a CPU
 * typically elects itself to be Monarch.
 */
static void mce_reign(void)
{
	int cpu;
	struct mce *m = NULL;
	int global_worst = 0;
	char *msg = NULL;
	char *nmsg = NULL;

	/*
	 * This CPU is the Monarch and the other CPUs have run
	 * through their handlers.
	 * Grade the severity of the errors of all the CPUs.
	 */
	for_each_possible_cpu(cpu) {
873 874
		int severity = mce_severity(&per_cpu(mces_seen, cpu),
					    mca_cfg.tolerant,
875
					    &nmsg, true);
876 877 878 879 880 881 882 883 884 885 886 887
		if (severity > global_worst) {
			msg = nmsg;
			global_worst = severity;
			m = &per_cpu(mces_seen, cpu);
		}
	}

	/*
	 * Cannot recover? Panic here then.
	 * This dumps all the mces in the log buffer and stops the
	 * other CPUs.
	 */
888
	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
889
		mce_panic("Fatal machine check", m, msg);
890 891 892 893 894 895 896 897 898 899 900

	/*
	 * For UC somewhere we let the CPU who detects it handle it.
	 * Also must let continue the others, otherwise the handling
	 * CPU could deadlock on a lock.
	 */

	/*
	 * No machine check event found. Must be some external
	 * source or one CPU is hung. Panic.
	 */
901
	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
902
		mce_panic("Fatal machine check from unknown source", NULL, NULL);
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

	/*
	 * Now clear all the mces_seen so that they don't reappear on
	 * the next mce.
	 */
	for_each_possible_cpu(cpu)
		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
}

static atomic_t global_nwo;

/*
 * Start of Monarch synchronization. This waits until all CPUs have
 * entered the exception handler and then determines if any of them
 * saw a fatal event that requires panic. Then it executes them
 * in the entry order.
 * TBD double check parallel CPU hotunplug
 */
H
Hidetoshi Seto 已提交
921
static int mce_start(int *no_way_out)
922
{
H
Hidetoshi Seto 已提交
923
	int order;
924
	int cpus = num_online_cpus();
925
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
926

H
Hidetoshi Seto 已提交
927 928
	if (!timeout)
		return -1;
929

H
Hidetoshi Seto 已提交
930
	atomic_add(*no_way_out, &global_nwo);
931
	/*
932 933
	 * Rely on the implied barrier below, such that global_nwo
	 * is updated before mce_callin.
934
	 */
935
	order = atomic_inc_return(&mce_callin);
936 937 938 939 940

	/*
	 * Wait for everyone.
	 */
	while (atomic_read(&mce_callin) != cpus) {
941 942
		if (mce_timed_out(&timeout,
				  "Timeout: Not all CPUs entered broadcast exception handler")) {
943
			atomic_set(&global_nwo, 0);
H
Hidetoshi Seto 已提交
944
			return -1;
945 946 947 948
		}
		ndelay(SPINUNIT);
	}

949 950 951 952
	/*
	 * mce_callin should be read before global_nwo
	 */
	smp_rmb();
953

H
Hidetoshi Seto 已提交
954 955 956 957
	if (order == 1) {
		/*
		 * Monarch: Starts executing now, the others wait.
		 */
958
		atomic_set(&mce_executing, 1);
H
Hidetoshi Seto 已提交
959 960 961 962 963 964 965 966
	} else {
		/*
		 * Subject: Now start the scanning loop one by one in
		 * the original callin order.
		 * This way when there are any shared banks it will be
		 * only seen by one CPU before cleared, avoiding duplicates.
		 */
		while (atomic_read(&mce_executing) < order) {
967 968
			if (mce_timed_out(&timeout,
					  "Timeout: Subject CPUs unable to finish machine check processing")) {
H
Hidetoshi Seto 已提交
969 970 971 972 973
				atomic_set(&global_nwo, 0);
				return -1;
			}
			ndelay(SPINUNIT);
		}
974 975 976
	}

	/*
H
Hidetoshi Seto 已提交
977
	 * Cache the global no_way_out state.
978
	 */
H
Hidetoshi Seto 已提交
979 980 981
	*no_way_out = atomic_read(&global_nwo);

	return order;
982 983 984 985 986 987 988 989 990
}

/*
 * Synchronize between CPUs after main scanning loop.
 * This invokes the bulk of the Monarch processing.
 */
static int mce_end(int order)
{
	int ret = -1;
991
	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

	if (!timeout)
		goto reset;
	if (order < 0)
		goto reset;

	/*
	 * Allow others to run.
	 */
	atomic_inc(&mce_executing);

	if (order == 1) {
		/* CHECKME: Can this race with a parallel hotplug? */
		int cpus = num_online_cpus();

		/*
		 * Monarch: Wait for everyone to go through their scanning
		 * loops.
		 */
		while (atomic_read(&mce_executing) <= cpus) {
1012 1013
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU unable to finish machine check processing"))
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
				goto reset;
			ndelay(SPINUNIT);
		}

		mce_reign();
		barrier();
		ret = 0;
	} else {
		/*
		 * Subject: Wait for Monarch to finish.
		 */
		while (atomic_read(&mce_executing) != 0) {
1026 1027
			if (mce_timed_out(&timeout,
					  "Timeout: Monarch CPU did not finish machine check processing"))
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
				goto reset;
			ndelay(SPINUNIT);
		}

		/*
		 * Don't reset anything. That's done by the Monarch.
		 */
		return 0;
	}

	/*
	 * Reset all global state.
	 */
reset:
	atomic_set(&global_nwo, 0);
	atomic_set(&mce_callin, 0);
	barrier();

	/*
	 * Let others run again.
	 */
	atomic_set(&mce_executing, 0);
	return ret;
}

static void mce_clear_state(unsigned long *toclear)
{
	int i;

1057
	for (i = 0; i < mca_cfg.banks; i++) {
1058
		if (test_bit(i, toclear))
1059
			mce_wrmsrl(msr_ops.status(i), 0);
1060 1061 1062
	}
}

1063 1064 1065 1066 1067 1068 1069 1070
static int do_memory_failure(struct mce *m)
{
	int flags = MF_ACTION_REQUIRED;
	int ret;

	pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
	if (!(m->mcgstatus & MCG_STATUS_RIPV))
		flags |= MF_MUST_KILL;
1071
	ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
1072 1073
	if (ret)
		pr_err("Memory error not recovered");
1074 1075
	else
		mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
1076 1077 1078
	return ret;
}

1079 1080
#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn)
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	unsigned long decoy_addr;

	/*
	 * Unmap this page from the kernel 1:1 mappings to make sure
	 * we don't log more errors because of speculative access to
	 * the page.
	 * We would like to just call:
	 *	set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
	 * but doing that would radically increase the odds of a
1091
	 * speculative access to the poison page because we'd have
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	 * the virtual address of the kernel 1:1 mapping sitting
	 * around in registers.
	 * Instead we get tricky.  We create a non-canonical address
	 * that looks just like the one we want, but has bit 63 flipped.
	 * This relies on set_memory_np() not checking whether we passed
	 * a legal address.
	 */

	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

	if (set_memory_np(decoy_addr, 1))
		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
}
#endif

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134

/*
 * Cases where we avoid rendezvous handler timeout:
 * 1) If this CPU is offline.
 *
 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
 *  skip those CPUs which remain looping in the 1st kernel - see
 *  crash_nmi_callback().
 *
 * Note: there still is a small window between kexec-ing and the new,
 * kdump kernel establishing a new #MC handler where a broadcasted MCE
 * might not get handled properly.
 */
static bool __mc_check_crashing_cpu(int cpu)
{
	if (cpu_is_offline(cpu) ||
	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
		u64 mcgstatus;

		mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
		if (mcgstatus & MCG_STATUS_RIPV) {
			mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
			return true;
		}
	}
	return false;
}

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
static void __mc_scan_banks(struct mce *m, struct mce *final,
			    unsigned long *toclear, unsigned long *valid_banks,
			    int no_way_out, int *worst)
{
	struct mca_config *cfg = &mca_cfg;
	int severity, i;

	for (i = 0; i < cfg->banks; i++) {
		__clear_bit(i, toclear);
		if (!test_bit(i, valid_banks))
			continue;
		if (!mce_banks[i].ctl)
			continue;

		m->misc = 0;
		m->addr = 0;
		m->bank = i;

		m->status = mce_rdmsrl(msr_ops.status(i));
		if ((m->status & MCI_STATUS_VAL) == 0)
			continue;

		/*
		 * Non uncorrected or non signaled errors are handled by
		 * machine_check_poll. Leave them alone, unless this panics.
		 */
		if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
			!no_way_out)
			continue;

		/*
		 * Set taint even when machine check was not enabled.
		 */
		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);

		severity = mce_severity(m, cfg->tolerant, NULL, true);

		/*
		 * When machine check was for corrected/deferred handler don't
		 * touch, unless we're panicing.
		 */
		if ((severity == MCE_KEEP_SEVERITY ||
		     severity == MCE_UCNA_SEVERITY) && !no_way_out)
			continue;
		__set_bit(i, toclear);
		if (severity == MCE_NO_SEVERITY) {
			/*
			 * Machine check event was not enabled. Clear, but
			 * ignore.
			 */
			continue;
		}

		mce_read_aux(m, i);

		/* assuming valid severity level != 0 */
		m->severity = severity;

		mce_log(m);

		if (severity > *worst) {
			*final = *m;
			*worst = severity;
		}
	}

	/* mce_clear_state will clear *final, save locally for use later */
	*m = *final;
}

1205 1206 1207 1208 1209 1210 1211
/*
 * The actual machine check handler. This only handles real
 * exceptions when something got corrupted coming in through int 18.
 *
 * This is executed in NMI context not subject to normal locking rules. This
 * implies that most kernel services cannot be safely used. Don't even
 * think about putting a printk in there!
1212 1213 1214 1215
 *
 * On Intel systems this is entered on all CPUs in parallel through
 * MCE broadcast. However some CPUs might be broken beyond repair,
 * so be always careful when synchronizing with others.
L
Linus Torvalds 已提交
1216
 */
I
Ingo Molnar 已提交
1217
void do_machine_check(struct pt_regs *regs, long error_code)
L
Linus Torvalds 已提交
1218
{
1219 1220
	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1221
	struct mca_config *cfg = &mca_cfg;
1222 1223
	int cpu = smp_processor_id();
	char *msg = "Unknown";
1224 1225
	struct mce m, *final;
	int worst = 0;
1226

1227 1228 1229 1230
	/*
	 * Establish sequential order between the CPUs entering the machine
	 * check handler.
	 */
1231
	int order = -1;
1232

1233 1234
	/*
	 * If no_way_out gets set, there is no safe way to recover from this
1235
	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1236 1237
	 */
	int no_way_out = 0;
1238

1239 1240 1241 1242 1243
	/*
	 * If kill_it gets set, there might be a way to recover from this
	 * error.
	 */
	int kill_it = 0;
1244 1245 1246 1247 1248 1249

	/*
	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
	 * on Intel.
	 */
	int lmce = 1;
1250

1251 1252
	if (__mc_check_crashing_cpu(cpu))
		return;
1253

1254
	ist_enter(regs);
1255

1256
	this_cpu_inc(mce_exception_count);
1257

1258
	mce_gather_info(&m, regs);
1259
	m.tsc = rdtsc();
1260

1261
	final = this_cpu_ptr(&mces_seen);
1262 1263
	*final = m;

1264
	memset(valid_banks, 0, sizeof(valid_banks));
1265
	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1266

L
Linus Torvalds 已提交
1267 1268
	barrier();

A
Andi Kleen 已提交
1269
	/*
1270 1271 1272
	 * When no restart IP might need to kill or panic.
	 * Assume the worst for now, but if we find the
	 * severity is MCE_AR_SEVERITY we have other options.
A
Andi Kleen 已提交
1273 1274 1275 1276
	 */
	if (!(m.mcgstatus & MCG_STATUS_RIPV))
		kill_it = 1;

1277
	/*
1278 1279
	 * Check if this MCE is signaled to only this logical processor,
	 * on Intel only.
1280
	 */
1281 1282 1283 1284
	if (m.cpuvendor == X86_VENDOR_INTEL)
		lmce = m.mcgstatus & MCG_STATUS_LMCES;

	/*
1285 1286
	 * Local machine check may already know that we have to panic.
	 * Broadcast machine check begins rendezvous in mce_start()
1287 1288
	 * Go through all banks in exclusion of the other CPUs. This way we
	 * don't report duplicated events on shared banks because the first one
1289
	 * to see it will clear it.
1290
	 */
1291 1292 1293 1294
	if (lmce) {
		if (no_way_out)
			mce_panic("Fatal local machine check", &m, msg);
	} else {
A
Ashok Raj 已提交
1295
		order = mce_start(&no_way_out);
1296
	}
A
Ashok Raj 已提交
1297

1298
	__mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
1299

1300 1301 1302
	if (!no_way_out)
		mce_clear_state(toclear);

I
Ingo Molnar 已提交
1303
	/*
1304 1305
	 * Do most of the synchronization with other CPUs.
	 * When there's any problem use only local no_way_out state.
I
Ingo Molnar 已提交
1306
	 */
A
Ashok Raj 已提交
1307 1308 1309 1310 1311
	if (!lmce) {
		if (mce_end(order) < 0)
			no_way_out = worst >= MCE_PANIC_SEVERITY;
	} else {
		/*
1312 1313 1314 1315 1316 1317
		 * If there was a fatal machine check we should have
		 * already called mce_panic earlier in this function.
		 * Since we re-read the banks, we might have found
		 * something new. Check again to see if we found a
		 * fatal error. We call "mce_severity()" again to
		 * make sure we have the right "msg".
A
Ashok Raj 已提交
1318
		 */
1319 1320 1321 1322
		if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
			mce_severity(&m, cfg->tolerant, &msg, true);
			mce_panic("Local fatal machine check!", &m, msg);
		}
A
Ashok Raj 已提交
1323
	}
1324 1325

	/*
1326 1327
	 * If tolerant is at an insane level we drop requests to kill
	 * processes and continue even when there is no way out.
1328
	 */
1329 1330 1331 1332
	if (cfg->tolerant == 3)
		kill_it = 0;
	else if (no_way_out)
		mce_panic("Fatal machine check on current CPU", &m, msg);
1333

1334 1335
	if (worst > 0)
		mce_report_event(regs);
1336
	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
B
Borislav Petkov 已提交
1337

1338
	sync_core();
1339

1340 1341
	if (worst != MCE_AR_SEVERITY && !kill_it)
		goto out_ist;
1342

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
	/* Fault was in user mode and we need to take some action */
	if ((m.cs & 3) == 3) {
		ist_begin_non_atomic(regs);
		local_irq_enable();

		if (kill_it || do_memory_failure(&m))
			force_sig(SIGBUS, current);
		local_irq_disable();
		ist_end_non_atomic();
	} else {
		if (!fixup_exception(regs, X86_TRAP_MC))
			mce_panic("Failed kernel mode recovery", &m, NULL);
1355
	}
1356 1357

out_ist:
1358
	ist_exit(regs);
L
Linus Torvalds 已提交
1359
}
1360
EXPORT_SYMBOL_GPL(do_machine_check);
L
Linus Torvalds 已提交
1361

1362
#ifndef CONFIG_MEMORY_FAILURE
1363
int memory_failure(unsigned long pfn, int flags)
1364
{
1365 1366
	/* mce_severity() should not hand us an ACTION_REQUIRED error */
	BUG_ON(flags & MF_ACTION_REQUIRED);
1367 1368 1369
	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
	       pfn);
1370 1371

	return 0;
1372
}
1373
#endif
1374

L
Linus Torvalds 已提交
1375
/*
1376 1377 1378
 * Periodic polling timer for "silent" machine check errors.  If the
 * poller finds an MCE, poll 2x faster.  When the poller finds no more
 * errors, poll 2x slower (up to check_interval seconds).
L
Linus Torvalds 已提交
1379
 */
1380
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
I
Ingo Molnar 已提交
1381

T
Thomas Gleixner 已提交
1382
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1383
static DEFINE_PER_CPU(struct timer_list, mce_timer);
L
Linus Torvalds 已提交
1384

C
Chen Gong 已提交
1385 1386 1387 1388 1389
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
	return interval;
}

1390
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
C
Chen Gong 已提交
1391

1392
static void __start_timer(struct timer_list *t, unsigned long interval)
1393
{
1394 1395
	unsigned long when = jiffies + interval;
	unsigned long flags;
1396

1397
	local_irq_save(flags);
1398

1399 1400
	if (!timer_pending(t) || time_before(when, t->expires))
		mod_timer(t, round_jiffies(when));
1401 1402

	local_irq_restore(flags);
1403 1404
}

1405
static void mce_timer_fn(struct timer_list *t)
L
Linus Torvalds 已提交
1406
{
1407
	struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
T
Thomas Gleixner 已提交
1408
	unsigned long iv;
1409

1410
	WARN_ON(cpu_t != t);
1411 1412

	iv = __this_cpu_read(mce_next_interval);
1413

1414
	if (mce_available(this_cpu_ptr(&cpu_info))) {
1415
		machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1416 1417 1418 1419 1420

		if (mce_intel_cmci_poll()) {
			iv = mce_adjust_timer(iv);
			goto done;
		}
I
Ingo Molnar 已提交
1421
	}
L
Linus Torvalds 已提交
1422 1423

	/*
1424 1425
	 * Alert userspace if needed. If we logged an MCE, reduce the polling
	 * interval, otherwise increase the polling interval.
L
Linus Torvalds 已提交
1426
	 */
1427
	if (mce_notify_irq())
1428
		iv = max(iv / 2, (unsigned long) HZ/100);
1429
	else
T
Thomas Gleixner 已提交
1430
		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1431 1432

done:
T
Thomas Gleixner 已提交
1433
	__this_cpu_write(mce_next_interval, iv);
1434
	__start_timer(t, iv);
C
Chen Gong 已提交
1435
}
1436

C
Chen Gong 已提交
1437 1438 1439 1440 1441
/*
 * Ensure that the timer is firing in @interval from now.
 */
void mce_timer_kick(unsigned long interval)
{
1442
	struct timer_list *t = this_cpu_ptr(&mce_timer);
C
Chen Gong 已提交
1443 1444
	unsigned long iv = __this_cpu_read(mce_next_interval);

1445
	__start_timer(t, interval);
1446

C
Chen Gong 已提交
1447 1448
	if (interval < iv)
		__this_cpu_write(mce_next_interval, interval);
1449 1450
}

1451 1452 1453 1454 1455 1456 1457 1458 1459
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		del_timer_sync(&per_cpu(mce_timer, cpu));
}

1460
/*
1461 1462 1463
 * Notify the user(s) about new machine check events.
 * Can be called from interrupt context, but not from machine check/NMI
 * context.
1464
 */
1465
int mce_notify_irq(void)
1466
{
1467 1468 1469
	/* Not more than two messages every minute */
	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);

1470
	if (test_and_clear_bit(0, &mce_need_notify)) {
1471
		mce_work_trigger();
1472

1473
		if (__ratelimit(&ratelimit))
H
Huang Ying 已提交
1474
			pr_info(HW_ERR "Machine check events logged\n");
1475 1476

		return 1;
L
Linus Torvalds 已提交
1477
	}
1478 1479
	return 0;
}
1480
EXPORT_SYMBOL_GPL(mce_notify_irq);
1481

1482
static int __mcheck_cpu_mce_banks_init(void)
1483 1484
{
	int i;
1485
	u8 num_banks = mca_cfg.banks;
1486

1487
	mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1488 1489
	if (!mce_banks)
		return -ENOMEM;
1490 1491

	for (i = 0; i < num_banks; i++) {
1492
		struct mce_bank *b = &mce_banks[i];
1493

1494 1495 1496 1497 1498 1499
		b->ctl = -1ULL;
		b->init = 1;
	}
	return 0;
}

1500
/*
L
Linus Torvalds 已提交
1501 1502
 * Initialize Machine Checks for a CPU.
 */
1503
static int __mcheck_cpu_cap_init(void)
L
Linus Torvalds 已提交
1504
{
1505
	unsigned b;
I
Ingo Molnar 已提交
1506
	u64 cap;
L
Linus Torvalds 已提交
1507 1508

	rdmsrl(MSR_IA32_MCG_CAP, cap);
1509 1510

	b = cap & MCG_BANKCNT_MASK;
1511
	if (!mca_cfg.banks)
1512
		pr_info("CPU supports %d MCE banks\n", b);
1513

1514
	if (b > MAX_NR_BANKS) {
1515
		pr_warn("Using only %u machine check banks out of %u\n",
1516 1517 1518 1519 1520
			MAX_NR_BANKS, b);
		b = MAX_NR_BANKS;
	}

	/* Don't support asymmetric configurations today */
1521 1522 1523
	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
	mca_cfg.banks = b;

1524
	if (!mce_banks) {
H
Hidetoshi Seto 已提交
1525
		int err = __mcheck_cpu_mce_banks_init();
1526

1527 1528
		if (err)
			return err;
L
Linus Torvalds 已提交
1529
	}
1530

1531
	/* Use accurate RIP reporting if available. */
1532
	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1533
		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
L
Linus Torvalds 已提交
1534

A
Andi Kleen 已提交
1535
	if (cap & MCG_SER_P)
1536
		mca_cfg.ser = 1;
A
Andi Kleen 已提交
1537

1538 1539 1540
	return 0;
}

1541
static void __mcheck_cpu_init_generic(void)
1542
{
1543
	enum mcp_flags m_fl = 0;
I
Ingo Molnar 已提交
1544
	mce_banks_t all_banks;
1545 1546
	u64 cap;

1547 1548 1549
	if (!mca_cfg.bootlog)
		m_fl = MCP_DONTLOG;

1550 1551 1552
	/*
	 * Log the machine checks left over from the previous reset.
	 */
1553
	bitmap_fill(all_banks, MAX_NR_BANKS);
1554
	machine_check_poll(MCP_UC | m_fl, &all_banks);
L
Linus Torvalds 已提交
1555

A
Andy Lutomirski 已提交
1556
	cr4_set_bits(X86_CR4_MCE);
L
Linus Torvalds 已提交
1557

1558
	rdmsrl(MSR_IA32_MCG_CAP, cap);
L
Linus Torvalds 已提交
1559 1560
	if (cap & MCG_CTL_P)
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1561 1562 1563 1564 1565
}

static void __mcheck_cpu_init_clear_banks(void)
{
	int i;
L
Linus Torvalds 已提交
1566

1567
	for (i = 0; i < mca_cfg.banks; i++) {
1568
		struct mce_bank *b = &mce_banks[i];
1569

1570
		if (!b->init)
1571
			continue;
1572 1573
		wrmsrl(msr_ops.ctl(i), b->ctl);
		wrmsrl(msr_ops.status(i), 0);
1574
	}
L
Linus Torvalds 已提交
1575 1576
}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
/*
 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
 * Vol 3B Table 15-20). But this confuses both the code that determines
 * whether the machine check occurred in kernel or user mode, and also
 * the severity assessment code. Pretend that EIPV was set, and take the
 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
 */
static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
{
	if (bank != 0)
		return;
	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
		return;
	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
			  MCACOD)) !=
			 (MCI_STATUS_UC|MCI_STATUS_EN|
			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
			  MCI_STATUS_AR|MCACOD_INSTR))
		return;

	m->mcgstatus |= MCG_STATUS_EIPV;
	m->ip = regs->ip;
	m->cs = regs->cs;
}

L
Linus Torvalds 已提交
1605
/* Add per CPU specific workarounds here */
1606
static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1607
{
1608 1609
	struct mca_config *cfg = &mca_cfg;

1610
	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1611
		pr_info("unknown CPU type - not enabling MCE support\n");
1612 1613 1614
		return -EOPNOTSUPP;
	}

L
Linus Torvalds 已提交
1615
	/* This should be disabled by the BIOS, but isn't always */
1616
	if (c->x86_vendor == X86_VENDOR_AMD) {
1617
		if (c->x86 == 15 && cfg->banks > 4) {
I
Ingo Molnar 已提交
1618 1619 1620 1621 1622
			/*
			 * disable GART TBL walk error reporting, which
			 * trips off incorrectly with the IOMMU & 3ware
			 * & Cerberus:
			 */
1623
			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
I
Ingo Molnar 已提交
1624
		}
1625
		if (c->x86 < 0x11 && cfg->bootlog < 0) {
I
Ingo Molnar 已提交
1626 1627 1628 1629
			/*
			 * Lots of broken BIOS around that don't clear them
			 * by default and leave crap in there. Don't log:
			 */
1630
			cfg->bootlog = 0;
I
Ingo Molnar 已提交
1631
		}
1632 1633 1634 1635
		/*
		 * Various K7s with broken bank 0 around. Always disable
		 * by default.
		 */
1636
		if (c->x86 == 6 && cfg->banks > 0)
1637
			mce_banks[0].ctl = 0;
1638

1639 1640 1641 1642 1643 1644 1645
		/*
		 * overflow_recov is supported for F15h Models 00h-0fh
		 * even though we don't have a CPUID bit for it.
		 */
		if (c->x86 == 0x15 && c->x86_model <= 0xf)
			mce_flags.overflow_recov = 1;

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
		/*
		 * Turn off MC4_MISC thresholding banks on those models since
		 * they're not supported there.
		 */
		if (c->x86 == 0x15 &&
		    (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
			int i;
			u64 hwcr;
			bool need_toggle;
			u32 msrs[] = {
1656 1657
				0x00000413, /* MC4_MISC0 */
				0xc0000408, /* MC4_MISC1 */
1658
			};
1659

1660
			rdmsrl(MSR_K7_HWCR, hwcr);
1661

1662 1663
			/* McStatusWrEn has to be set */
			need_toggle = !(hwcr & BIT(18));
1664

1665 1666
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1667

1668 1669 1670
			/* Clear CntP bit safely */
			for (i = 0; i < ARRAY_SIZE(msrs); i++)
				msr_clear_bit(msrs[i], 62);
1671

1672 1673 1674 1675
			/* restore old settings */
			if (need_toggle)
				wrmsrl(MSR_K7_HWCR, hwcr);
		}
L
Linus Torvalds 已提交
1676
	}
1677

1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	if (c->x86_vendor == X86_VENDOR_INTEL) {
		/*
		 * SDM documents that on family 6 bank 0 should not be written
		 * because it aliases to another special BIOS controlled
		 * register.
		 * But it's not aliased anymore on model 0x1a+
		 * Don't ignore bank 0 completely because there could be a
		 * valid event later, merely don't write CTL0.
		 */

1688
		if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1689
			mce_banks[0].init = 0;
1690 1691 1692 1693 1694 1695

		/*
		 * All newer Intel systems support MCE broadcasting. Enable
		 * synchronization with a one second timeout.
		 */
		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1696 1697
			cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
1698

1699 1700 1701 1702
		/*
		 * There are also broken BIOSes on some Pentium M and
		 * earlier systems:
		 */
1703 1704
		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
			cfg->bootlog = 0;
1705 1706 1707

		if (c->x86 == 6 && c->x86_model == 45)
			quirk_no_way_out = quirk_sandybridge_ifu;
1708
	}
1709 1710 1711
	if (cfg->monarch_timeout < 0)
		cfg->monarch_timeout = 0;
	if (cfg->bootlog != 0)
1712
		cfg->panic_timeout = 30;
1713 1714

	return 0;
1715
}
L
Linus Torvalds 已提交
1716

1717
static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1718 1719
{
	if (c->x86 != 5)
1720 1721
		return 0;

1722 1723
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
1724
		intel_p5_mcheck_init(c);
1725
		return 1;
1726 1727 1728
		break;
	case X86_VENDOR_CENTAUR:
		winchip_mcheck_init(c);
1729
		return 1;
1730
		break;
1731 1732
	default:
		return 0;
1733
	}
1734 1735

	return 0;
1736 1737
}

1738 1739 1740 1741
/*
 * Init basic CPU features needed for early decoding of MCEs.
 */
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1742
{
1743
	if (c->x86_vendor == X86_VENDOR_AMD) {
1744 1745 1746
		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
1747 1748 1749 1750 1751 1752 1753

		if (mce_flags.smca) {
			msr_ops.ctl	= smca_ctl_reg;
			msr_ops.status	= smca_status_reg;
			msr_ops.addr	= smca_addr_reg;
			msr_ops.misc	= smca_misc_reg;
		}
1754 1755
	}
}
1756

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
{
	struct mca_config *cfg = &mca_cfg;

	 /*
	  * All newer Centaur CPUs support MCE broadcasting. Enable
	  * synchronization with a one second timeout.
	  */
	if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
	     c->x86 > 6) {
		if (cfg->monarch_timeout < 0)
			cfg->monarch_timeout = USEC_PER_SEC;
	}
}

1772 1773 1774 1775 1776 1777 1778
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_init(c);
		mce_adjust_timer = cmci_intel_adjust_timer;
		break;
1779

1780 1781
	case X86_VENDOR_AMD: {
		mce_amd_feature_init(c);
1782
		break;
1783
		}
1784 1785 1786
	case X86_VENDOR_CENTAUR:
		mce_centaur_feature_init(c);
		break;
1787

L
Linus Torvalds 已提交
1788 1789 1790 1791 1792
	default:
		break;
	}
}

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
{
	switch (c->x86_vendor) {
	case X86_VENDOR_INTEL:
		mce_intel_feature_clear(c);
		break;
	default:
		break;
	}
}

1804
static void mce_start_timer(struct timer_list *t)
1805
{
1806
	unsigned long iv = check_interval * HZ;
1807

1808
	if (mca_cfg.ignore_ce || !iv)
1809 1810
		return;

1811 1812
	this_cpu_write(mce_next_interval, iv);
	__start_timer(t, iv);
1813 1814
}

1815 1816 1817 1818
static void __mcheck_cpu_setup_timer(void)
{
	struct timer_list *t = this_cpu_ptr(&mce_timer);

1819
	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1820 1821
}

T
Thomas Gleixner 已提交
1822 1823
static void __mcheck_cpu_init_timer(void)
{
1824
	struct timer_list *t = this_cpu_ptr(&mce_timer);
T
Thomas Gleixner 已提交
1825

1826
	timer_setup(t, mce_timer_fn, TIMER_PINNED);
1827
	mce_start_timer(t);
T
Thomas Gleixner 已提交
1828 1829
}

A
Andi Kleen 已提交
1830 1831 1832
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
1833
	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
A
Andi Kleen 已提交
1834 1835 1836 1837 1838 1839 1840
	       smp_processor_id());
}

/* Call the installed machine check handler for this CPU setup. */
void (*machine_check_vector)(struct pt_regs *, long error_code) =
						unexpected_machine_check;

1841 1842 1843 1844 1845
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
{
	machine_check_vector(regs, error_code);
}

1846
/*
L
Linus Torvalds 已提交
1847
 * Called for each booted CPU to set up machine checks.
I
Ingo Molnar 已提交
1848
 * Must be called with preempt off:
L
Linus Torvalds 已提交
1849
 */
1850
void mcheck_cpu_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
1851
{
1852
	if (mca_cfg.disabled)
1853 1854
		return;

1855 1856
	if (__mcheck_cpu_ancient_init(c))
		return;
1857

1858
	if (!mce_available(c))
L
Linus Torvalds 已提交
1859 1860
		return;

1861
	if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1862
		mca_cfg.disabled = 1;
1863 1864 1865
		return;
	}

1866
	if (mce_gen_pool_init()) {
1867
		mca_cfg.disabled = 1;
1868 1869 1870 1871
		pr_emerg("Couldn't allocate MCE records pool!\n");
		return;
	}

1872 1873
	machine_check_vector = do_machine_check;

1874
	__mcheck_cpu_init_early(c);
1875 1876
	__mcheck_cpu_init_generic();
	__mcheck_cpu_init_vendor(c);
1877
	__mcheck_cpu_init_clear_banks();
1878
	__mcheck_cpu_setup_timer();
L
Linus Torvalds 已提交
1879 1880
}

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
/*
 * Called for each booted CPU to clear some machine checks opt-ins
 */
void mcheck_cpu_clear(struct cpuinfo_x86 *c)
{
	if (mca_cfg.disabled)
		return;

	if (!mce_available(c))
		return;

	/*
	 * Possibly to clear general settings generic to x86
	 * __mcheck_cpu_clear_generic(c);
	 */
	__mcheck_cpu_clear_vendor(c);

L
Linus Torvalds 已提交
1898 1899
}

1900 1901 1902
static void __mce_disable_bank(void *arg)
{
	int bank = *((int *)arg);
1903
	__clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
	cmci_disable_bank(bank);
}

void mce_disable_bank(int bank)
{
	if (bank >= mca_cfg.banks) {
		pr_warn(FW_BUG
			"Ignoring request to disable invalid MCA bank %d.\n",
			bank);
		return;
	}
	set_bit(bank, mce_banks_ce_disabled);
	on_each_cpu(__mce_disable_bank, &bank, 1);
}

H
Hidetoshi Seto 已提交
1919
/*
1920 1921
 * mce=off Disables machine check
 * mce=no_cmci Disables CMCI
1922
 * mce=no_lmce Disables LMCE
1923 1924
 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1925 1926 1927
 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
 *	monarchtimeout is how long to wait for other CPUs on machine
 *	check, or 0 to not wait
1928 1929
 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
	and older.
H
Hidetoshi Seto 已提交
1930
 * mce=nobootlog Don't log MCEs from before booting.
1931
 * mce=bios_cmci_threshold Don't program the CMCI threshold
1932
 * mce=recovery force enable memcpy_mcsafe()
H
Hidetoshi Seto 已提交
1933
 */
L
Linus Torvalds 已提交
1934 1935
static int __init mcheck_enable(char *str)
{
1936 1937
	struct mca_config *cfg = &mca_cfg;

1938
	if (*str == 0) {
1939
		enable_p5_mce();
1940 1941
		return 1;
	}
1942 1943
	if (*str == '=')
		str++;
L
Linus Torvalds 已提交
1944
	if (!strcmp(str, "off"))
1945
		cfg->disabled = 1;
1946
	else if (!strcmp(str, "no_cmci"))
1947
		cfg->cmci_disabled = true;
1948
	else if (!strcmp(str, "no_lmce"))
1949
		cfg->lmce_disabled = 1;
1950
	else if (!strcmp(str, "dont_log_ce"))
1951
		cfg->dont_log_ce = true;
1952
	else if (!strcmp(str, "ignore_ce"))
1953
		cfg->ignore_ce = true;
H
Hidetoshi Seto 已提交
1954
	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1955
		cfg->bootlog = (str[0] == 'b');
1956
	else if (!strcmp(str, "bios_cmci_threshold"))
1957
		cfg->bios_cmci_threshold = 1;
1958
	else if (!strcmp(str, "recovery"))
1959
		cfg->recovery = 1;
1960
	else if (isdigit(str[0])) {
1961
		if (get_option(&str, &cfg->tolerant) == 2)
1962
			get_option(&str, &(cfg->monarch_timeout));
1963
	} else {
1964
		pr_info("mce argument %s ignored. Please use /sys\n", str);
H
Hidetoshi Seto 已提交
1965 1966
		return 0;
	}
1967
	return 1;
L
Linus Torvalds 已提交
1968
}
1969
__setup("mce", mcheck_enable);
L
Linus Torvalds 已提交
1970

1971
int __init mcheck_init(void)
1972
{
1973
	mcheck_intel_therm_init();
1974
	mce_register_decode_chain(&first_nb);
1975
	mce_register_decode_chain(&mce_srao_nb);
1976
	mce_register_decode_chain(&mce_default_nb);
1977
	mcheck_vendor_init_severity();
1978

1979
	INIT_WORK(&mce_work, mce_gen_pool_process);
1980 1981
	init_irq_work(&mce_irq_work, mce_irq_work_cb);

1982 1983 1984
	return 0;
}

1985
/*
1986
 * mce_syscore: PM support
1987
 */
L
Linus Torvalds 已提交
1988

1989 1990 1991 1992
/*
 * Disable machine checks on suspend and shutdown. We can't really handle
 * them later.
 */
1993
static void mce_disable_error_reporting(void)
1994 1995 1996
{
	int i;

1997
	for (i = 0; i < mca_cfg.banks; i++) {
1998
		struct mce_bank *b = &mce_banks[i];
1999

2000
		if (b->init)
2001
			wrmsrl(msr_ops.ctl(i), 0);
2002
	}
2003 2004 2005 2006 2007 2008
	return;
}

static void vendor_disable_error_reporting(void)
{
	/*
2009
	 * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
2010 2011 2012 2013
	 * Disabling them for just a single offlined CPU is bad, since it will
	 * inhibit reporting for all shared resources on the socket like the
	 * last level cache (LLC), the integrated memory controller (iMC), etc.
	 */
2014 2015
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2016 2017 2018
		return;

	mce_disable_error_reporting();
2019 2020
}

2021
static int mce_syscore_suspend(void)
2022
{
2023 2024
	vendor_disable_error_reporting();
	return 0;
2025 2026
}

2027
static void mce_syscore_shutdown(void)
2028
{
2029
	vendor_disable_error_reporting();
2030 2031
}

I
Ingo Molnar 已提交
2032 2033 2034 2035 2036
/*
 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
 * Only one CPU is active at this time, the others get re-added later using
 * CPU hotplug:
 */
2037
static void mce_syscore_resume(void)
L
Linus Torvalds 已提交
2038
{
2039
	__mcheck_cpu_init_generic();
2040
	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2041
	__mcheck_cpu_init_clear_banks();
L
Linus Torvalds 已提交
2042 2043
}

2044
static struct syscore_ops mce_syscore_ops = {
2045 2046 2047
	.suspend	= mce_syscore_suspend,
	.shutdown	= mce_syscore_shutdown,
	.resume		= mce_syscore_resume,
2048 2049
};

2050
/*
2051
 * mce_device: Sysfs support
2052 2053
 */

2054 2055
static void mce_cpu_restart(void *data)
{
2056
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2057
		return;
2058
	__mcheck_cpu_init_generic();
2059
	__mcheck_cpu_init_clear_banks();
2060
	__mcheck_cpu_init_timer();
2061 2062
}

L
Linus Torvalds 已提交
2063
/* Reinit MCEs after user configuration changes */
2064 2065
static void mce_restart(void)
{
2066
	mce_timer_delete_all();
2067
	on_each_cpu(mce_cpu_restart, NULL, 1);
L
Linus Torvalds 已提交
2068 2069
}

2070
/* Toggle features for corrected errors */
2071
static void mce_disable_cmci(void *data)
2072
{
2073
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2074 2075 2076 2077 2078 2079
		return;
	cmci_clear();
}

static void mce_enable_ce(void *all)
{
2080
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2081 2082 2083 2084
		return;
	cmci_reenable();
	cmci_recheck();
	if (all)
2085
		__mcheck_cpu_init_timer();
2086 2087
}

2088
static struct bus_type mce_subsys = {
I
Ingo Molnar 已提交
2089
	.name		= "machinecheck",
2090
	.dev_name	= "machinecheck",
L
Linus Torvalds 已提交
2091 2092
};

2093
DEFINE_PER_CPU(struct device *, mce_device);
I
Ingo Molnar 已提交
2094

2095
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2096 2097 2098
{
	return container_of(attr, struct mce_bank, attr);
}
2099

2100
static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2101 2102
			 char *buf)
{
2103
	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2104 2105
}

2106
static ssize_t set_bank(struct device *s, struct device_attribute *attr,
H
Hidetoshi Seto 已提交
2107
			const char *buf, size_t size)
2108
{
H
Hidetoshi Seto 已提交
2109
	u64 new;
I
Ingo Molnar 已提交
2110

2111
	if (kstrtou64(buf, 0, &new) < 0)
2112
		return -EINVAL;
I
Ingo Molnar 已提交
2113

2114
	attr_to_bank(attr)->ctl = new;
2115
	mce_restart();
I
Ingo Molnar 已提交
2116

H
Hidetoshi Seto 已提交
2117
	return size;
2118
}
2119

2120 2121
static ssize_t set_ignore_ce(struct device *s,
			     struct device_attribute *attr,
2122 2123 2124 2125
			     const char *buf, size_t size)
{
	u64 new;

2126
	if (kstrtou64(buf, 0, &new) < 0)
2127 2128
		return -EINVAL;

S
Seunghun Han 已提交
2129
	mutex_lock(&mce_sysfs_mutex);
2130
	if (mca_cfg.ignore_ce ^ !!new) {
2131 2132
		if (new) {
			/* disable ce features */
2133 2134
			mce_timer_delete_all();
			on_each_cpu(mce_disable_cmci, NULL, 1);
2135
			mca_cfg.ignore_ce = true;
2136 2137
		} else {
			/* enable ce features */
2138
			mca_cfg.ignore_ce = false;
2139 2140 2141
			on_each_cpu(mce_enable_ce, (void *)1, 1);
		}
	}
S
Seunghun Han 已提交
2142 2143
	mutex_unlock(&mce_sysfs_mutex);

2144 2145 2146
	return size;
}

2147 2148
static ssize_t set_cmci_disabled(struct device *s,
				 struct device_attribute *attr,
2149 2150 2151 2152
				 const char *buf, size_t size)
{
	u64 new;

2153
	if (kstrtou64(buf, 0, &new) < 0)
2154 2155
		return -EINVAL;

S
Seunghun Han 已提交
2156
	mutex_lock(&mce_sysfs_mutex);
2157
	if (mca_cfg.cmci_disabled ^ !!new) {
2158 2159
		if (new) {
			/* disable cmci */
2160
			on_each_cpu(mce_disable_cmci, NULL, 1);
2161
			mca_cfg.cmci_disabled = true;
2162 2163
		} else {
			/* enable cmci */
2164
			mca_cfg.cmci_disabled = false;
2165 2166 2167
			on_each_cpu(mce_enable_ce, NULL, 1);
		}
	}
S
Seunghun Han 已提交
2168 2169
	mutex_unlock(&mce_sysfs_mutex);

2170 2171 2172
	return size;
}

2173 2174
static ssize_t store_int_with_restart(struct device *s,
				      struct device_attribute *attr,
2175 2176
				      const char *buf, size_t size)
{
S
Seunghun Han 已提交
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
	unsigned long old_check_interval = check_interval;
	ssize_t ret = device_store_ulong(s, attr, buf, size);

	if (check_interval == old_check_interval)
		return ret;

	if (check_interval < 1)
		check_interval = 1;

	mutex_lock(&mce_sysfs_mutex);
2187
	mce_restart();
S
Seunghun Han 已提交
2188 2189
	mutex_unlock(&mce_sysfs_mutex);

2190 2191 2192
	return ret;
}

2193
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2194
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2195
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
I
Ingo Molnar 已提交
2196

2197 2198
static struct dev_ext_attribute dev_attr_check_interval = {
	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2199 2200
	&check_interval
};
I
Ingo Molnar 已提交
2201

2202
static struct dev_ext_attribute dev_attr_ignore_ce = {
2203 2204
	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
	&mca_cfg.ignore_ce
2205 2206
};

2207
static struct dev_ext_attribute dev_attr_cmci_disabled = {
2208 2209
	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
	&mca_cfg.cmci_disabled
2210 2211
};

2212 2213 2214
static struct device_attribute *mce_device_attrs[] = {
	&dev_attr_tolerant.attr,
	&dev_attr_check_interval.attr,
2215
#ifdef CONFIG_X86_MCELOG_LEGACY
2216
	&dev_attr_trigger,
2217
#endif
2218 2219 2220 2221
	&dev_attr_monarch_timeout.attr,
	&dev_attr_dont_log_ce.attr,
	&dev_attr_ignore_ce.attr,
	&dev_attr_cmci_disabled.attr,
2222 2223
	NULL
};
L
Linus Torvalds 已提交
2224

2225
static cpumask_var_t mce_device_initialized;
2226

2227 2228 2229 2230 2231
static void mce_device_release(struct device *dev)
{
	kfree(dev);
}

2232
/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2233
static int mce_device_create(unsigned int cpu)
L
Linus Torvalds 已提交
2234
{
2235
	struct device *dev;
L
Linus Torvalds 已提交
2236
	int err;
2237
	int i, j;
2238

A
Andreas Herrmann 已提交
2239
	if (!mce_available(&boot_cpu_data))
2240 2241
		return -EIO;

2242 2243 2244 2245
	dev = per_cpu(mce_device, cpu);
	if (dev)
		return 0;

2246 2247 2248
	dev = kzalloc(sizeof *dev, GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
2249 2250
	dev->id  = cpu;
	dev->bus = &mce_subsys;
2251
	dev->release = &mce_device_release;
2252

2253
	err = device_register(dev);
2254 2255
	if (err) {
		put_device(dev);
2256
		return err;
2257
	}
2258

2259 2260
	for (i = 0; mce_device_attrs[i]; i++) {
		err = device_create_file(dev, mce_device_attrs[i]);
2261 2262 2263
		if (err)
			goto error;
	}
2264
	for (j = 0; j < mca_cfg.banks; j++) {
2265
		err = device_create_file(dev, &mce_banks[j].attr);
2266 2267 2268
		if (err)
			goto error2;
	}
2269
	cpumask_set_cpu(cpu, mce_device_initialized);
2270
	per_cpu(mce_device, cpu) = dev;
2271

2272
	return 0;
2273
error2:
2274
	while (--j >= 0)
2275
		device_remove_file(dev, &mce_banks[j].attr);
2276
error:
I
Ingo Molnar 已提交
2277
	while (--i >= 0)
2278
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2279

2280
	device_unregister(dev);
2281

2282 2283 2284
	return err;
}

2285
static void mce_device_remove(unsigned int cpu)
2286
{
2287
	struct device *dev = per_cpu(mce_device, cpu);
2288 2289
	int i;

2290
	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2291 2292
		return;

2293 2294
	for (i = 0; mce_device_attrs[i]; i++)
		device_remove_file(dev, mce_device_attrs[i]);
I
Ingo Molnar 已提交
2295

2296
	for (i = 0; i < mca_cfg.banks; i++)
2297
		device_remove_file(dev, &mce_banks[i].attr);
I
Ingo Molnar 已提交
2298

2299 2300
	device_unregister(dev);
	cpumask_clear_cpu(cpu, mce_device_initialized);
2301
	per_cpu(mce_device, cpu) = NULL;
2302 2303
}

2304
/* Make sure there are no machine checks on offlined CPUs. */
2305
static void mce_disable_cpu(void)
2306
{
2307
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2308
		return;
2309

2310
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2311
		cmci_clear();
2312

2313
	vendor_disable_error_reporting();
2314 2315
}

2316
static void mce_reenable_cpu(void)
2317
{
I
Ingo Molnar 已提交
2318
	int i;
2319

2320
	if (!mce_available(raw_cpu_ptr(&cpu_info)))
2321
		return;
I
Ingo Molnar 已提交
2322

2323
	if (!cpuhp_tasks_frozen)
A
Andi Kleen 已提交
2324
		cmci_reenable();
2325
	for (i = 0; i < mca_cfg.banks; i++) {
2326
		struct mce_bank *b = &mce_banks[i];
2327

2328
		if (b->init)
2329
			wrmsrl(msr_ops.ctl(i), b->ctl);
2330
	}
2331 2332
}

2333
static int mce_cpu_dead(unsigned int cpu)
2334
{
2335
	mce_intel_hcpu_update(cpu);
2336

2337 2338 2339 2340
	/* intentionally ignoring frozen here */
	if (!cpuhp_tasks_frozen)
		cmci_rediscover();
	return 0;
2341 2342
}

2343
static int mce_cpu_online(unsigned int cpu)
2344
{
2345
	struct timer_list *t = this_cpu_ptr(&mce_timer);
2346
	int ret;
2347

2348
	mce_device_create(cpu);
B
Borislav Petkov 已提交
2349

2350 2351 2352 2353
	ret = mce_threshold_create_device(cpu);
	if (ret) {
		mce_device_remove(cpu);
		return ret;
2354
	}
2355
	mce_reenable_cpu();
2356
	mce_start_timer(t);
2357
	return 0;
2358 2359
}

2360 2361
static int mce_cpu_pre_down(unsigned int cpu)
{
2362
	struct timer_list *t = this_cpu_ptr(&mce_timer);
2363 2364 2365 2366 2367 2368 2369

	mce_disable_cpu();
	del_timer_sync(t);
	mce_threshold_remove_device(cpu);
	mce_device_remove(cpu);
	return 0;
}
2370

2371
static __init void mce_init_banks(void)
2372 2373 2374
{
	int i;

2375
	for (i = 0; i < mca_cfg.banks; i++) {
2376
		struct mce_bank *b = &mce_banks[i];
2377
		struct device_attribute *a = &b->attr;
I
Ingo Molnar 已提交
2378

2379
		sysfs_attr_init(&a->attr);
2380 2381
		a->attr.name	= b->attrname;
		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
I
Ingo Molnar 已提交
2382 2383 2384 2385

		a->attr.mode	= 0644;
		a->show		= show_bank;
		a->store	= set_bank;
2386 2387 2388
	}
}

2389
static __init int mcheck_init_device(void)
2390 2391 2392
{
	int err;

2393 2394 2395 2396 2397 2398
	/*
	 * Check if we have a spare virtual bit. This will only become
	 * a problem if/when we move beyond 5-level page tables.
	 */
	MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);

2399 2400 2401 2402
	if (!mce_available(&boot_cpu_data)) {
		err = -EIO;
		goto err_out;
	}
2403

2404 2405 2406 2407
	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
		err = -ENOMEM;
		goto err_out;
	}
2408

2409
	mce_init_banks();
2410

2411
	err = subsys_system_register(&mce_subsys, NULL);
2412
	if (err)
2413
		goto err_out_mem;
2414

2415 2416 2417 2418
	err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
				mce_cpu_dead);
	if (err)
		goto err_out_mem;
2419

2420 2421 2422
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
				mce_cpu_online, mce_cpu_pre_down);
	if (err < 0)
2423
		goto err_out_online;
2424

2425 2426 2427 2428
	register_syscore_ops(&mce_syscore_ops);

	return 0;

2429 2430
err_out_online:
	cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2431 2432 2433 2434 2435

err_out_mem:
	free_cpumask_var(mce_device_initialized);

err_out:
2436
	pr_err("Unable to init MCE device (rc: %d)\n", err);
I
Ingo Molnar 已提交
2437

L
Linus Torvalds 已提交
2438 2439
	return err;
}
2440
device_initcall_sync(mcheck_init_device);
I
Ingo Molnar 已提交
2441

2442 2443 2444 2445 2446
/*
 * Old style boot options parsing. Only for compatibility.
 */
static int __init mcheck_disable(char *str)
{
2447
	mca_cfg.disabled = 1;
2448 2449 2450
	return 1;
}
__setup("nomce", mcheck_disable);
I
Ingo Molnar 已提交
2451

2452 2453
#ifdef CONFIG_DEBUG_FS
struct dentry *mce_get_debugfs_dir(void)
I
Ingo Molnar 已提交
2454
{
2455
	static struct dentry *dmce;
I
Ingo Molnar 已提交
2456

2457 2458
	if (!dmce)
		dmce = debugfs_create_dir("mce", NULL);
I
Ingo Molnar 已提交
2459

2460 2461
	return dmce;
}
I
Ingo Molnar 已提交
2462

2463 2464 2465
static void mce_reset(void)
{
	cpu_missing = 0;
2466
	atomic_set(&mce_fake_panicked, 0);
2467 2468 2469 2470
	atomic_set(&mce_executing, 0);
	atomic_set(&mce_callin, 0);
	atomic_set(&global_nwo, 0);
}
I
Ingo Molnar 已提交
2471

2472 2473 2474 2475
static int fake_panic_get(void *data, u64 *val)
{
	*val = fake_panic;
	return 0;
I
Ingo Molnar 已提交
2476 2477
}

2478
static int fake_panic_set(void *data, u64 val)
I
Ingo Molnar 已提交
2479
{
2480 2481 2482
	mce_reset();
	fake_panic = val;
	return 0;
I
Ingo Molnar 已提交
2483 2484
}

2485 2486
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
			fake_panic_set, "%llu\n");
2487

2488
static int __init mcheck_debugfs_init(void)
2489
{
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
	struct dentry *dmce, *ffake_panic;

	dmce = mce_get_debugfs_dir();
	if (!dmce)
		return -ENOMEM;
	ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
					  &fake_panic_fops);
	if (!ffake_panic)
		return -ENOMEM;

	return 0;
2501
}
2502 2503
#else
static int __init mcheck_debugfs_init(void) { return -EINVAL; }
2504
#endif
2505

2506 2507 2508
DEFINE_STATIC_KEY_FALSE(mcsafe_key);
EXPORT_SYMBOL_GPL(mcsafe_key);

2509 2510
static int __init mcheck_late_init(void)
{
2511 2512 2513
	if (mca_cfg.recovery)
		static_branch_inc(&mcsafe_key);

2514
	mcheck_debugfs_init();
2515
	cec_init();
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525

	/*
	 * Flush out everything that has been logged during early boot, now that
	 * everything has been initialized (workqueues, decoders, ...).
	 */
	mce_schedule_work();

	return 0;
}
late_initcall(mcheck_late_init);