mce_amd.c 33.7 KB
Newer Older
1
/*
2
 *  (c) 2005-2016 Advanced Micro Devices, Inc.
3 4 5 6 7
 *  Your use of this code is subject to the terms and conditions of the
 *  GNU general public license version 2. See "COPYING" or
 *  http://www.gnu.org/licenses/gpl.html
 *
 *  Written by Jacob Shin - AMD, Inc.
8
 *  Maintained by: Borislav Petkov <bp@alien8.de>
9
 *
B
Borislav Petkov 已提交
10
 *  All MC4_MISCi registers are shared between cores on a node.
11 12 13
 */
#include <linux/interrupt.h>
#include <linux/notifier.h>
I
Ingo Molnar 已提交
14
#include <linux/kobject.h>
15
#include <linux/percpu.h>
I
Ingo Molnar 已提交
16 17
#include <linux/errno.h>
#include <linux/sched.h>
18
#include <linux/sysfs.h>
19
#include <linux/slab.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/smp.h>
23
#include <linux/string.h>
I
Ingo Molnar 已提交
24

25
#include <asm/amd_nb.h>
26 27 28
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
29
#include <asm/trace/irq_vectors.h>
30

B
Borislav Petkov 已提交
31 32
#include "mce-internal.h"

33
#define NR_BLOCKS         5
J
Jacob Shin 已提交
34 35 36
#define THRESHOLD_MAX     0xFFF
#define INT_TYPE_APIC     0x00020000
#define MASK_VALID_HI     0x80000000
37 38
#define MASK_CNTP_HI      0x40000000
#define MASK_LOCKED_HI    0x20000000
J
Jacob Shin 已提交
39 40 41 42
#define MASK_LVTOFF_HI    0x00F00000
#define MASK_COUNT_EN_HI  0x00080000
#define MASK_INT_TYPE_HI  0x00060000
#define MASK_OVERFLOW_HI  0x00010000
43
#define MASK_ERR_COUNT_HI 0x00000FFF
44 45
#define MASK_BLKPTR_LO    0xFF000000
#define MCG_XBLK_ADDR     0xC0000400
46

47 48 49 50 51 52 53
/* Deferred error settings */
#define MSR_CU_DEF_ERR		0xC0000410
#define MASK_DEF_LVTOFF		0x000000F0
#define MASK_DEF_INT_TYPE	0x00000006
#define DEF_LVT_OFF		0x2
#define DEF_INT_TYPE_APIC	0x2

54 55 56 57 58
/* Scalable MCA: */

/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF	0xF000

59 60
static bool thresholding_en;

61 62 63 64
static const char * const th_names[] = {
	"load_store",
	"insn_fetch",
	"combined_unit",
65
	"decode_unit",
66 67 68 69
	"northbridge",
	"execution_unit",
};

70 71 72 73 74
static const char * const smca_umc_block_names[] = {
	"dram_ecc",
	"misc_umc"
};

B
Borislav Petkov 已提交
75 76 77 78 79 80
struct smca_bank_name {
	const char *name;	/* Short name for sysfs */
	const char *long_name;	/* Long name for pretty-printing */
};

static struct smca_bank_name smca_names[] = {
81 82 83 84 85 86 87 88 89 90 91 92 93
	[SMCA_LS]	= { "load_store",	"Load Store Unit" },
	[SMCA_IF]	= { "insn_fetch",	"Instruction Fetch Unit" },
	[SMCA_L2_CACHE]	= { "l2_cache",		"L2 Cache" },
	[SMCA_DE]	= { "decode_unit",	"Decode Unit" },
	[SMCA_EX]	= { "execution_unit",	"Execution Unit" },
	[SMCA_FP]	= { "floating_point",	"Floating Point Unit" },
	[SMCA_L3_CACHE]	= { "l3_cache",		"L3 Cache" },
	[SMCA_CS]	= { "coherent_slave",	"Coherent Slave" },
	[SMCA_PIE]	= { "pie",		"Power, Interrupts, etc." },
	[SMCA_UMC]	= { "umc",		"Unified Memory Controller" },
	[SMCA_PB]	= { "param_block",	"Parameter Block" },
	[SMCA_PSP]	= { "psp",		"Platform Security Processor" },
	[SMCA_SMU]	= { "smu",		"System Management Unit" },
94
};
B
Borislav Petkov 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111

const char *smca_get_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].name;
}

const char *smca_get_long_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].long_name;
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126
static enum smca_bank_types smca_get_bank_type(struct mce *m)
{
	struct smca_bank *b;

	if (m->bank >= N_SMCA_BANK_TYPES)
		return N_SMCA_BANK_TYPES;

	b = &smca_banks[m->bank];
	if (!b->hwid)
		return N_SMCA_BANK_TYPES;

	return b->hwid->bank_type;
}

127
static struct smca_hwid smca_hwid_mcatypes[] = {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	/* { bank_type, hwid_mcatype, xec_bitmap } */

	/* ZN Core (HWID=0xB0) MCA types */
	{ SMCA_LS,	 HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
	{ SMCA_IF,	 HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
	{ SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
	{ SMCA_DE,	 HWID_MCATYPE(0xB0, 0x3), 0x1FF },
	/* HWID 0xB0 MCATYPE 0x4 is Reserved */
	{ SMCA_EX,	 HWID_MCATYPE(0xB0, 0x5), 0x7FF },
	{ SMCA_FP,	 HWID_MCATYPE(0xB0, 0x6), 0x7F },
	{ SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },

	/* Data Fabric MCA types */
	{ SMCA_CS,	 HWID_MCATYPE(0x2E, 0x0), 0x1FF },
	{ SMCA_PIE,	 HWID_MCATYPE(0x2E, 0x1), 0xF },

	/* Unified Memory Controller MCA type */
	{ SMCA_UMC,	 HWID_MCATYPE(0x96, 0x0), 0x3F },

	/* Parameter Block MCA type */
	{ SMCA_PB,	 HWID_MCATYPE(0x05, 0x0), 0x1 },
149

150 151 152 153 154
	/* Platform Security Processor MCA type */
	{ SMCA_PSP,	 HWID_MCATYPE(0xFF, 0x0), 0x1 },

	/* System Management Unit MCA type */
	{ SMCA_SMU,	 HWID_MCATYPE(0x01, 0x0), 0x1 },
155
};
156

157
struct smca_bank smca_banks[MAX_NR_BANKS];
158
EXPORT_SYMBOL_GPL(smca_banks);
159

160 161 162 163 164 165 166 167 168 169 170
/*
 * In SMCA enabled processors, we can have multiple banks for a given IP type.
 * So to define a unique name for each bank, we use a temp c-string to append
 * the MCA_IPID[InstanceId] to type's name in get_name().
 *
 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
 */
#define MAX_MCATYPE_NAME_LEN	30
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];

171
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
172
static DEFINE_PER_CPU(unsigned int, bank_map);	/* see which banks are on */
173

174
static void amd_threshold_interrupt(void);
175 176 177 178 179 180 181
static void amd_deferred_error_interrupt(void);

static void default_deferred_error_interrupt(void)
{
	pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
182

183
static void smca_configure(unsigned int bank, unsigned int cpu)
184
{
185
	unsigned int i, hwid_mcatype;
186
	struct smca_hwid *s_hwid;
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	u32 high, low;
	u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);

	/* Set appropriate bits in MCA_CONFIG */
	if (!rdmsr_safe(smca_config, &low, &high)) {
		/*
		 * OS is required to set the MCAX bit to acknowledge that it is
		 * now using the new MSR ranges and new registers under each
		 * bank. It also means that the OS will configure deferred
		 * errors in the new MCx_CONFIG register. If the bit is not set,
		 * uncorrectable errors will cause a system panic.
		 *
		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
		 */
		high |= BIT(0);

		/*
		 * SMCA sets the Deferred Error Interrupt type per bank.
		 *
		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
		 * if the DeferredIntType bit field is available.
		 *
		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
		 * high portion of the MSR). OS should set this to 0x1 to enable
		 * APIC based interrupt. First, check that no interrupt has been
		 * set.
		 */
		if ((low & BIT(5)) && !((high >> 5) & 0x3))
			high |= BIT(5);

		wrmsr(smca_config, low, high);
	}
219

220 221
	/* Return early if this bank was already initialized. */
	if (smca_banks[bank].hwid)
222 223
		return;

224
	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
225 226 227 228
		pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
		return;
	}

229 230
	hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
				    (high & MCI_IPID_MCATYPE) >> 16);
231 232

	for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
233 234 235
		s_hwid = &smca_hwid_mcatypes[i];
		if (hwid_mcatype == s_hwid->hwid_mcatype) {
			smca_banks[bank].hwid = s_hwid;
236
			smca_banks[bank].id = low;
237
			smca_banks[bank].sysfs_id = s_hwid->count++;
238 239 240 241 242
			break;
		}
	}
}

243
struct thresh_restart {
I
Ingo Molnar 已提交
244 245
	struct threshold_block	*b;
	int			reset;
246 247
	int			set_lvt_off;
	int			lvt_off;
I
Ingo Molnar 已提交
248
	u16			old_limit;
249 250
};

251 252
static inline bool is_shared_bank(int bank)
{
253 254 255 256 257 258 259
	/*
	 * Scalable MCA provides for only one core to have access to the MSRs of
	 * a shared bank.
	 */
	if (mce_flags.smca)
		return false;

260 261 262 263
	/* Bank 4 is for northbridge reporting and is thus shared */
	return (bank == 4);
}

264
static const char *bank4_names(const struct threshold_block *b)
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
{
	switch (b->address) {
	/* MSR4_MISC0 */
	case 0x00000413:
		return "dram";

	case 0xc0000408:
		return "ht_links";

	case 0xc0000409:
		return "l3_cache";

	default:
		WARN(1, "Funny MSR: 0x%08x\n", b->address);
		return "";
	}
};


284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{
	/*
	 * bank 4 supports APIC LVT interrupts implicitly since forever.
	 */
	if (bank == 4)
		return true;

	/*
	 * IntP: interrupt present; if this bit is set, the thresholding
	 * bank can generate APIC LVT interrupts
	 */
	return msr_high_bits & BIT(28);
}

299 300 301 302 303 304 305 306 307 308 309 310
static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
{
	int msr = (hi & MASK_LVTOFF_HI) >> 20;

	if (apic < 0) {
		pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
		       b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	if (apic != msr) {
311 312 313 314 315 316 317 318
		/*
		 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
		 * the BIOS provides the value. The original field where LVT offset
		 * was set is reserved. Return early here:
		 */
		if (mce_flags.smca)
			return 0;

319 320 321 322 323 324 325 326 327
		pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	return 1;
};

328
/* Reprogram MCx_MISC MSR behind this threshold bank. */
329
static void threshold_restart_bank(void *_tr)
330
{
331
	struct thresh_restart *tr = _tr;
332
	u32 hi, lo;
333

334
	rdmsr(tr->b->address, lo, hi);
335

336
	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
337
		tr->reset = 1;	/* limit cannot be lower than err count */
338

339
	if (tr->reset) {		/* reset err count and overflow bit */
340 341
		hi =
		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
342 343
		    (THRESHOLD_MAX - tr->b->threshold_limit);
	} else if (tr->old_limit) {	/* change limit w/o reset */
344
		int new_count = (hi & THRESHOLD_MAX) +
345
		    (tr->old_limit - tr->b->threshold_limit);
I
Ingo Molnar 已提交
346

347
		hi = (hi & ~MASK_ERR_COUNT_HI) |
348 349 350
		    (new_count & THRESHOLD_MAX);
	}

351 352 353 354 355 356
	/* clear IntType */
	hi &= ~MASK_INT_TYPE_HI;

	if (!tr->b->interrupt_capable)
		goto done;

357
	if (tr->set_lvt_off) {
358 359 360 361 362
		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
			/* set new lvt offset */
			hi &= ~MASK_LVTOFF_HI;
			hi |= tr->lvt_off << 20;
		}
363 364
	}

365 366 367 368
	if (tr->b->interrupt_enable)
		hi |= INT_TYPE_APIC;

 done:
369

370 371
	hi |= MASK_COUNT_EN_HI;
	wrmsr(tr->b->address, lo, hi);
372 373
}

374 375 376 377 378 379 380 381 382 383 384 385
static void mce_threshold_block_init(struct threshold_block *b, int offset)
{
	struct thresh_restart tr = {
		.b			= b,
		.set_lvt_off		= 1,
		.lvt_off		= offset,
	};

	b->threshold_limit		= THRESHOLD_MAX;
	threshold_restart_bank(&tr);
};

386
static int setup_APIC_mce_threshold(int reserved, int new)
387 388 389 390 391 392 393 394
{
	if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
static int setup_APIC_deferred_error(int reserved, int new)
{
	if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
{
	u32 low = 0, high = 0;
	int def_offset = -1, def_new;

	if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
		return;

	def_new = (low & MASK_DEF_LVTOFF) >> 4;
	if (!(low & MASK_DEF_LVTOFF)) {
		pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
		def_new = DEF_LVT_OFF;
		low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
	}

	def_offset = setup_APIC_deferred_error(def_offset, def_new);
	if ((def_offset == def_new) &&
	    (deferred_error_int_vector != amd_deferred_error_interrupt))
		deferred_error_int_vector = amd_deferred_error_interrupt;

424 425 426
	if (!mce_flags.smca)
		low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;

427 428 429
	wrmsr(MSR_CU_DEF_ERR, low, high);
}

430
static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
			     unsigned int bank, unsigned int block)
{
	u32 addr = 0, offset = 0;

	if (mce_flags.smca) {
		if (!block) {
			addr = MSR_AMD64_SMCA_MCx_MISC(bank);
		} else {
			/*
			 * For SMCA enabled processors, BLKPTR field of the
			 * first MISC register (MCx_MISC0) indicates presence of
			 * additional MISC register set (MISC1-4).
			 */
			u32 low, high;

446
			if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
447 448 449 450 451
				return addr;

			if (!(low & MCI_CONFIG_MCAX))
				return addr;

452
			if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
453 454 455 456 457 458 459 460 461
			    (low & MASK_BLKPTR_LO))
				addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
		}
		return addr;
	}

	/* Fall back to method we used for older processors: */
	switch (block) {
	case 0:
462
		addr = msr_ops.misc(bank);
463 464 465 466 467 468 469 470 471 472 473 474
		break;
	case 1:
		offset = ((low & MASK_BLKPTR_LO) >> 21);
		if (offset)
			addr = MCG_XBLK_ADDR + offset;
		break;
	default:
		addr = ++current_addr;
	}
	return addr;
}

475 476 477 478 479
static int
prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
			int offset, u32 misc_high)
{
	unsigned int cpu = smp_processor_id();
480
	u32 smca_low, smca_high;
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	struct threshold_block b;
	int new;

	if (!block)
		per_cpu(bank_map, cpu) |= (1 << bank);

	memset(&b, 0, sizeof(b));
	b.cpu			= cpu;
	b.bank			= bank;
	b.block			= block;
	b.address		= addr;
	b.interrupt_capable	= lvt_interrupt_supported(bank, misc_high);

	if (!b.interrupt_capable)
		goto done;

	b.interrupt_enable = 1;

499 500 501 502
	if (!mce_flags.smca) {
		new = (misc_high & MASK_LVTOFF_HI) >> 20;
		goto set_offset;
	}
503

504 505 506 507 508 509 510
	/* Gather LVT offset for thresholding: */
	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
		goto out;

	new = (smca_low & SMCA_THR_LVT_OFF) >> 12;

set_offset:
511 512 513 514 515 516 517 518 519 520 521 522
	offset = setup_APIC_mce_threshold(offset, new);

	if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
		mce_threshold_vector = amd_threshold_interrupt;

done:
	mce_threshold_block_init(&b, offset);

out:
	return offset;
}

523
/* cpu init entry point, called from mce.c with preempt off */
524
void mce_amd_feature_init(struct cpuinfo_x86 *c)
525
{
526
	u32 low = 0, high = 0, address = 0;
527
	unsigned int bank, block, cpu = smp_processor_id();
528
	int offset = -1;
529

530
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
531
		if (mce_flags.smca)
532
			smca_configure(bank, cpu);
533

534
		for (block = 0; block < NR_BLOCKS; ++block) {
535
			address = get_block_address(cpu, address, low, high, bank, block);
536 537
			if (!address)
				break;
538 539

			if (rdmsr_safe(address, &low, &high))
540
				break;
541

542 543
			if (!(high & MASK_VALID_HI))
				continue;
544

545 546
			if (!(high & MASK_CNTP_HI)  ||
			     (high & MASK_LOCKED_HI))
547 548
				continue;

549
			offset = prepare_threshold_block(bank, block, address, offset, high);
550
		}
551
	}
552 553 554

	if (mce_flags.succor)
		deferred_error_interrupt_enable(c);
555 556
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
	/* We start from the normalized address */
	u64 ret_addr = norm_addr;

	u32 tmp;

	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
	u8 intlv_addr_sel, intlv_addr_bit;
	u8 num_intlv_bits, hashed_bit;
	u8 lgcy_mmio_hole_en, base = 0;
	u8 cs_mask, cs_id = 0;
	bool hash_enabled = false;

	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
	if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
		goto out_err;

	/* Remove HiAddrOffset from normalized address, if enabled: */
	if (tmp & BIT(0)) {
		u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;

		if (norm_addr >= hi_addr_offset) {
			ret_addr -= hi_addr_offset;
			base = 1;
		}
	}

	/* Read D18F0x110 (DramBaseAddress). */
	if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
		goto out_err;

	/* Check if address range is valid. */
	if (!(tmp & BIT(0))) {
		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
			__func__, tmp);
		goto out_err;
	}

	lgcy_mmio_hole_en = tmp & BIT(1);
	intlv_num_chan	  = (tmp >> 4) & 0xF;
	intlv_addr_sel	  = (tmp >> 8) & 0x7;
	dram_base_addr	  = (tmp & GENMASK_ULL(31, 12)) << 16;

	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
	if (intlv_addr_sel > 3) {
		pr_err("%s: Invalid interleave address select %d.\n",
			__func__, intlv_addr_sel);
		goto out_err;
	}

	/* Read D18F0x114 (DramLimitAddress). */
	if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
		goto out_err;

	intlv_num_sockets = (tmp >> 8) & 0x1;
	intlv_num_dies	  = (tmp >> 10) & 0x3;
	dram_limit_addr	  = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);

	intlv_addr_bit = intlv_addr_sel + 8;

	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
	switch (intlv_num_chan) {
	case 0:	intlv_num_chan = 0; break;
	case 1: intlv_num_chan = 1; break;
	case 3: intlv_num_chan = 2; break;
	case 5:	intlv_num_chan = 3; break;
	case 7:	intlv_num_chan = 4; break;

	case 8: intlv_num_chan = 1;
		hash_enabled = true;
		break;
	default:
		pr_err("%s: Invalid number of interleaved channels %d.\n",
			__func__, intlv_num_chan);
		goto out_err;
	}

	num_intlv_bits = intlv_num_chan;

	if (intlv_num_dies > 2) {
		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
			__func__, intlv_num_dies);
		goto out_err;
	}

	num_intlv_bits += intlv_num_dies;

	/* Add a bit if sockets are interleaved. */
	num_intlv_bits += intlv_num_sockets;

	/* Assert num_intlv_bits <= 4 */
	if (num_intlv_bits > 4) {
		pr_err("%s: Invalid interleave bits %d.\n",
			__func__, num_intlv_bits);
		goto out_err;
	}

	if (num_intlv_bits > 0) {
		u64 temp_addr_x, temp_addr_i, temp_addr_y;
		u8 die_id_bit, sock_id_bit, cs_fabric_id;

		/*
		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
		 * This is the fabric id for this coherent slave. Use
		 * umc/channel# as instance id of the coherent slave
		 * for FICAA.
		 */
		if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
			goto out_err;

		cs_fabric_id = (tmp >> 8) & 0xFF;
		die_id_bit   = 0;

		/* If interleaved over more than 1 channel: */
		if (intlv_num_chan) {
			die_id_bit = intlv_num_chan;
			cs_mask	   = (1 << die_id_bit) - 1;
			cs_id	   = cs_fabric_id & cs_mask;
		}

		sock_id_bit = die_id_bit;

		/* Read D18F1x208 (SystemFabricIdMask). */
		if (intlv_num_dies || intlv_num_sockets)
			if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
				goto out_err;

		/* If interleaved over more than 1 die. */
		if (intlv_num_dies) {
			sock_id_bit  = die_id_bit + intlv_num_dies;
			die_id_shift = (tmp >> 24) & 0xF;
			die_id_mask  = (tmp >> 8) & 0xFF;

			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
		}

		/* If interleaved over more than 1 socket. */
		if (intlv_num_sockets) {
			socket_id_shift	= (tmp >> 28) & 0xF;
			socket_id_mask	= (tmp >> 16) & 0xFF;

			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
		}

		/*
		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
		 * where III is the ID for this CS, and XXXXXXYYYYY are the
		 * address bits from the post-interleaved address.
		 * "num_intlv_bits" has been calculated to tell us how many "I"
		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
		 * there are (where "I" starts).
		 */
		temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
		temp_addr_i = (cs_id << intlv_addr_bit);
		temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
		ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
	}

	/* Add dram base address */
	ret_addr += dram_base_addr;

	/* If legacy MMIO hole enabled */
	if (lgcy_mmio_hole_en) {
		if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
			goto out_err;

		dram_hole_base = tmp & GENMASK(31, 24);
		if (ret_addr >= dram_hole_base)
			ret_addr += (BIT_ULL(32) - dram_hole_base);
	}

	if (hash_enabled) {
		/* Save some parentheses and grab ls-bit at the end. */
		hashed_bit =	(ret_addr >> 12) ^
				(ret_addr >> 18) ^
				(ret_addr >> 21) ^
				(ret_addr >> 30) ^
				cs_id;

		hashed_bit &= BIT(0);

		if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
			ret_addr ^= BIT(intlv_addr_bit);
	}

	/* Is calculated system address is above DRAM limit address? */
	if (ret_addr > dram_limit_addr)
		goto out_err;

	*sys_addr = ret_addr;
	return 0;

out_err:
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);

757 758 759 760 761 762 763 764 765 766 767
bool amd_mce_is_memory_error(struct mce *m)
{
	/* ErrCodeExt[20:16] */
	u8 xec = (m->status >> 16) & 0x1f;

	if (mce_flags.smca)
		return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0;

	return m->bank == 4 && xec == 0x8;
}

768
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
769 770 771 772 773 774
{
	struct mce m;

	mce_setup(&m);

	m.status = status;
775
	m.misc   = misc;
776 777
	m.bank   = bank;
	m.tsc	 = rdtsc();
778

779
	if (m.status & MCI_STATUS_ADDRV) {
780
		m.addr = addr;
781

782 783 784 785 786 787 788 789 790 791 792
		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m.addr >> 56) & 0x3f;

			m.addr &= GENMASK_ULL(55, lsb);
		}
	}

793 794 795 796 797 798
	if (mce_flags.smca) {
		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);

		if (m.status & MCI_STATUS_SYNDV)
			rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
	}
799

800
	mce_log(&m);
801 802
}

803
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
804 805 806
{
	entering_irq();
	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
807 808
	inc_irq_stat(irq_deferred_error_count);
	deferred_error_int_vector();
809 810 811 812
	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
	exiting_ack_irq();
}

813 814 815 816 817
/*
 * Returns true if the logged error is deferred. False, otherwise.
 */
static inline bool
_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
818
{
819
	u64 status, addr = 0;
820

821 822 823
	rdmsrl(msr_stat, status);
	if (!(status & MCI_STATUS_VAL))
		return false;
824

825 826
	if (status & MCI_STATUS_ADDRV)
		rdmsrl(msr_addr, addr);
827

828
	__log_error(bank, status, addr, misc);
829

830
	wrmsrl(msr_stat, 0);
831 832

	return status & MCI_STATUS_DEFERRED;
833 834
}

835
/*
836 837 838 839 840 841 842
 * We have three scenarios for checking for Deferred errors:
 *
 * 1) Non-SMCA systems check MCA_STATUS and log error if found.
 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
 *    clear MCA_DESTAT.
 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
 *    log it.
843
 */
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
static void log_error_deferred(unsigned int bank)
{
	bool defrd;

	defrd = _log_error_bank(bank, msr_ops.status(bank),
					msr_ops.addr(bank), 0);

	if (!mce_flags.smca)
		return;

	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
	if (defrd) {
		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
		return;
	}

	/*
	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
	 * for a valid error.
	 */
	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
}

/* APIC interrupt handler for deferred errors */
static void amd_deferred_error_interrupt(void)
{
	unsigned int bank;

	for (bank = 0; bank < mca_cfg.banks; ++bank)
		log_error_deferred(bank);
}

static void log_error_thresholding(unsigned int bank, u64 misc)
{
	_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
}
881

882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
static void log_and_reset_block(struct threshold_block *block)
{
	struct thresh_restart tr;
	u32 low = 0, high = 0;

	if (!block)
		return;

	if (rdmsr_safe(block->address, &low, &high))
		return;

	if (!(high & MASK_OVERFLOW_HI))
		return;

	/* Log the MCE which caused the threshold event. */
	log_error_thresholding(block->bank, ((u64)high << 32) | low);

	/* Reset threshold block after logging error. */
	memset(&tr, 0, sizeof(tr));
	tr.b = block;
	threshold_restart_bank(&tr);
}

905
/*
906 907
 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
 * goes off when error_count reaches threshold_limit.
908
 */
909
static void amd_threshold_interrupt(void)
910
{
911 912
	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
	unsigned int bank, cpu = smp_processor_id();
913

914
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
915
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
916
			continue;
917

918 919 920
		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
		if (!first_block)
			continue;
921

922 923 924 925 926 927 928
		/*
		 * The first block is also the head of the list. Check it first
		 * before iterating over the rest.
		 */
		log_and_reset_block(first_block);
		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
			log_and_reset_block(block);
929
	}
930 931 932 933 934 935 936
}

/*
 * Sysfs Interface
 */

struct threshold_attr {
J
Jacob Shin 已提交
937
	struct attribute attr;
I
Ingo Molnar 已提交
938 939
	ssize_t (*show) (struct threshold_block *, char *);
	ssize_t (*store) (struct threshold_block *, const char *, size_t count);
940 941
};

I
Ingo Molnar 已提交
942 943 944
#define SHOW_FIELDS(name)						\
static ssize_t show_ ## name(struct threshold_block *b, char *buf)	\
{									\
945
	return sprintf(buf, "%lu\n", (unsigned long) b->name);		\
J
Jacob Shin 已提交
946
}
947 948 949
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)

I
Ingo Molnar 已提交
950
static ssize_t
H
Hidetoshi Seto 已提交
951
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
952
{
953
	struct thresh_restart tr;
I
Ingo Molnar 已提交
954 955
	unsigned long new;

956 957 958
	if (!b->interrupt_capable)
		return -EINVAL;

959
	if (kstrtoul(buf, 0, &new) < 0)
960
		return -EINVAL;
I
Ingo Molnar 已提交
961

962 963
	b->interrupt_enable = !!new;

964
	memset(&tr, 0, sizeof(tr));
I
Ingo Molnar 已提交
965 966
	tr.b		= b;

967
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
968

H
Hidetoshi Seto 已提交
969
	return size;
970 971
}

I
Ingo Molnar 已提交
972
static ssize_t
H
Hidetoshi Seto 已提交
973
store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
974
{
975
	struct thresh_restart tr;
I
Ingo Molnar 已提交
976 977
	unsigned long new;

978
	if (kstrtoul(buf, 0, &new) < 0)
979
		return -EINVAL;
I
Ingo Molnar 已提交
980

981 982 983 984
	if (new > THRESHOLD_MAX)
		new = THRESHOLD_MAX;
	if (new < 1)
		new = 1;
I
Ingo Molnar 已提交
985

986
	memset(&tr, 0, sizeof(tr));
987
	tr.old_limit = b->threshold_limit;
988
	b->threshold_limit = new;
989
	tr.b = b;
990

991
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
992

H
Hidetoshi Seto 已提交
993
	return size;
994 995
}

996 997
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
998 999 1000
	u32 lo, hi;

	rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
1001

1002 1003
	return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
				     (THRESHOLD_MAX - b->threshold_limit)));
1004 1005
}

1006 1007 1008 1009
static struct threshold_attr error_count = {
	.attr = {.name = __stringify(error_count), .mode = 0444 },
	.show = show_error_count,
};
1010

1011 1012 1013 1014 1015
#define RW_ATTR(val)							\
static struct threshold_attr val = {					\
	.attr	= {.name = __stringify(val), .mode = 0644 },		\
	.show	= show_## val,						\
	.store	= store_## val,						\
1016 1017
};

J
Jacob Shin 已提交
1018 1019
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
1020 1021 1022 1023

static struct attribute *default_attrs[] = {
	&threshold_limit.attr,
	&error_count.attr,
1024 1025
	NULL,	/* possibly interrupt_enable if supported, see below */
	NULL,
1026 1027
};

I
Ingo Molnar 已提交
1028 1029
#define to_block(k)	container_of(k, struct threshold_block, kobj)
#define to_attr(a)	container_of(a, struct threshold_attr, attr)
1030 1031 1032

static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
1033
	struct threshold_block *b = to_block(kobj);
1034 1035
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1036

1037
	ret = a->show ? a->show(b, buf) : -EIO;
I
Ingo Molnar 已提交
1038

1039 1040 1041 1042 1043 1044
	return ret;
}

static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
{
1045
	struct threshold_block *b = to_block(kobj);
1046 1047
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1048

1049
	ret = a->store ? a->store(b, buf, count) : -EIO;
I
Ingo Molnar 已提交
1050

1051 1052 1053
	return ret;
}

1054
static const struct sysfs_ops threshold_ops = {
I
Ingo Molnar 已提交
1055 1056
	.show			= show,
	.store			= store,
1057 1058 1059
};

static struct kobj_type threshold_ktype = {
I
Ingo Molnar 已提交
1060 1061
	.sysfs_ops		= &threshold_ops,
	.default_attrs		= default_attrs,
1062 1063
};

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
static const char *get_name(unsigned int bank, struct threshold_block *b)
{
	unsigned int bank_type;

	if (!mce_flags.smca) {
		if (b && bank == 4)
			return bank4_names(b);

		return th_names[bank];
	}

1075
	if (!smca_banks[bank].hwid)
1076 1077
		return NULL;

1078
	bank_type = smca_banks[bank].hwid->bank_type;
1079 1080 1081 1082 1083 1084 1085

	if (b && bank_type == SMCA_UMC) {
		if (b->block < ARRAY_SIZE(smca_umc_block_names))
			return smca_umc_block_names[b->block];
		return NULL;
	}

1086 1087 1088
	if (smca_banks[bank].hwid->count == 1)
		return smca_get_name(bank_type);

1089
	snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
B
Borislav Petkov 已提交
1090
		 "%s_%x", smca_get_name(bank_type),
1091
			  smca_banks[bank].sysfs_id);
1092 1093 1094
	return buf_mcatype;
}

1095 1096
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
				     unsigned int block, u32 address)
1097 1098
{
	struct threshold_block *b = NULL;
I
Ingo Molnar 已提交
1099 1100
	u32 low, high;
	int err;
1101

1102
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1103 1104
		return 0;

1105
	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1106
		return 0;
1107 1108 1109 1110 1111 1112 1113 1114

	if (!(high & MASK_VALID_HI)) {
		if (block)
			goto recurse;
		else
			return 0;
	}

1115 1116
	if (!(high & MASK_CNTP_HI)  ||
	     (high & MASK_LOCKED_HI))
1117 1118 1119 1120 1121 1122
		goto recurse;

	b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
	if (!b)
		return -ENOMEM;

I
Ingo Molnar 已提交
1123 1124 1125 1126 1127
	b->block		= block;
	b->bank			= bank;
	b->cpu			= cpu;
	b->address		= address;
	b->interrupt_enable	= 0;
1128
	b->interrupt_capable	= lvt_interrupt_supported(bank, high);
I
Ingo Molnar 已提交
1129
	b->threshold_limit	= THRESHOLD_MAX;
1130

1131
	if (b->interrupt_capable) {
1132
		threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1133 1134
		b->interrupt_enable = 1;
	} else {
1135
		threshold_ktype.default_attrs[2] = NULL;
1136
	}
1137

1138 1139
	INIT_LIST_HEAD(&b->miscj);

I
Ingo Molnar 已提交
1140
	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1141 1142
		list_add(&b->miscj,
			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
I
Ingo Molnar 已提交
1143
	} else {
1144
		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
I
Ingo Molnar 已提交
1145
	}
1146

1147 1148
	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
				   per_cpu(threshold_banks, cpu)[bank]->kobj,
1149
				   get_name(bank, b));
1150 1151 1152
	if (err)
		goto out_free;
recurse:
1153
	address = get_block_address(cpu, address, low, high, bank, ++block);
1154 1155
	if (!address)
		return 0;
1156

1157
	err = allocate_threshold_blocks(cpu, bank, block, address);
1158 1159 1160
	if (err)
		goto out_free;

1161 1162
	if (b)
		kobject_uevent(&b->kobj, KOBJ_ADD);
1163

1164 1165 1166 1167
	return err;

out_free:
	if (b) {
1168
		kobject_put(&b->kobj);
1169
		list_del(&b->miscj);
1170 1171 1172 1173 1174
		kfree(b);
	}
	return err;
}

1175
static int __threshold_add_blocks(struct threshold_bank *b)
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
{
	struct list_head *head = &b->blocks->miscj;
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	int err = 0;

	err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
	if (err)
		return err;

	list_for_each_entry_safe(pos, tmp, head, miscj) {

		err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
		if (err) {
			list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
				kobject_del(&pos->kobj);

			return err;
		}
	}
	return err;
}

1199
static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1200
{
1201
	struct device *dev = per_cpu(mce_device, cpu);
1202
	struct amd_northbridge *nb = NULL;
1203
	struct threshold_bank *b = NULL;
1204
	const char *name = get_name(bank, NULL);
1205
	int err = 0;
1206

1207 1208 1209
	if (!dev)
		return -ENODEV;

1210
	if (is_shared_bank(bank)) {
1211 1212 1213
		nb = node_to_amd_nb(amd_get_nb_id(cpu));

		/* threshold descriptor already initialized on this node? */
1214
		if (nb && nb->bank4) {
1215 1216 1217 1218 1219 1220 1221
			/* yes, use it */
			b = nb->bank4;
			err = kobject_add(b->kobj, &dev->kobj, name);
			if (err)
				goto out;

			per_cpu(threshold_banks, cpu)[bank] = b;
1222
			refcount_inc(&b->cpus);
1223 1224 1225 1226 1227 1228 1229

			err = __threshold_add_blocks(b);

			goto out;
		}
	}

1230
	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1231 1232 1233 1234 1235
	if (!b) {
		err = -ENOMEM;
		goto out;
	}

1236
	b->kobj = kobject_create_and_add(name, &dev->kobj);
1237 1238
	if (!b->kobj) {
		err = -EINVAL;
1239
		goto out_free;
1240
	}
1241

1242
	per_cpu(threshold_banks, cpu)[bank] = b;
1243

1244
	if (is_shared_bank(bank)) {
1245
		refcount_set(&b->cpus, 1);
1246 1247

		/* nb is already initialized, see above */
1248 1249 1250 1251
		if (nb) {
			WARN_ON(nb->bank4);
			nb->bank4 = b;
		}
1252 1253
	}

1254
	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1255 1256
	if (!err)
		goto out;
1257

1258
 out_free:
1259
	kfree(b);
1260 1261

 out:
1262 1263 1264
	return err;
}

1265
static void deallocate_threshold_block(unsigned int cpu,
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
						 unsigned int bank)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];

	if (!head)
		return;

	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1276
		kobject_put(&pos->kobj);
1277 1278 1279 1280 1281 1282 1283 1284
		list_del(&pos->miscj);
		kfree(pos);
	}

	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
}

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
static void __threshold_remove_blocks(struct threshold_bank *b)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;

	kobject_del(b->kobj);

	list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
		kobject_del(&pos->kobj);
}

1296
static void threshold_remove_bank(unsigned int cpu, int bank)
1297
{
1298
	struct amd_northbridge *nb;
1299 1300 1301 1302 1303
	struct threshold_bank *b;

	b = per_cpu(threshold_banks, cpu)[bank];
	if (!b)
		return;
1304

1305 1306 1307
	if (!b->blocks)
		goto free_out;

1308
	if (is_shared_bank(bank)) {
1309
		if (!refcount_dec_and_test(&b->cpus)) {
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
			__threshold_remove_blocks(b);
			per_cpu(threshold_banks, cpu)[bank] = NULL;
			return;
		} else {
			/*
			 * the last CPU on this node using the shared bank is
			 * going away, remove that bank now.
			 */
			nb = node_to_amd_nb(amd_get_nb_id(cpu));
			nb->bank4 = NULL;
		}
	}

1323 1324 1325
	deallocate_threshold_block(cpu, bank);

free_out:
1326
	kobject_del(b->kobj);
1327
	kobject_put(b->kobj);
1328 1329
	kfree(b);
	per_cpu(threshold_banks, cpu)[bank] = NULL;
1330 1331
}

1332
int mce_threshold_remove_device(unsigned int cpu)
1333
{
J
Jacob Shin 已提交
1334
	unsigned int bank;
1335

1336 1337 1338
	if (!thresholding_en)
		return 0;

1339
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
1340
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1341 1342 1343
			continue;
		threshold_remove_bank(cpu, bank);
	}
1344
	kfree(per_cpu(threshold_banks, cpu));
1345
	per_cpu(threshold_banks, cpu) = NULL;
1346
	return 0;
1347 1348
}

1349
/* create dir/files for all valid threshold banks */
1350
int mce_threshold_create_device(unsigned int cpu)
1351
{
1352 1353 1354 1355
	unsigned int bank;
	struct threshold_bank **bp;
	int err = 0;

1356 1357 1358
	if (!thresholding_en)
		return 0;

1359 1360 1361 1362
	bp = per_cpu(threshold_banks, cpu);
	if (bp)
		return 0;

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
		     GFP_KERNEL);
	if (!bp)
		return -ENOMEM;

	per_cpu(threshold_banks, cpu) = bp;

	for (bank = 0; bank < mca_cfg.banks; ++bank) {
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
			continue;
		err = threshold_create_bank(cpu, bank);
		if (err)
1375
			goto err;
1376
	}
1377 1378
	return err;
err:
1379
	mce_threshold_remove_device(cpu);
1380
	return err;
1381 1382 1383 1384
}

static __init int threshold_init_device(void)
{
J
Jacob Shin 已提交
1385
	unsigned lcpu = 0;
1386

1387 1388 1389
	if (mce_threshold_vector == amd_threshold_interrupt)
		thresholding_en = true;

1390 1391
	/* to hit CPUs online before the notifier is up */
	for_each_online_cpu(lcpu) {
1392
		int err = mce_threshold_create_device(lcpu);
I
Ingo Molnar 已提交
1393

1394
		if (err)
1395
			return err;
1396
	}
I
Ingo Molnar 已提交
1397

1398
	return 0;
1399
}
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
/*
 * there are 3 funcs which need to be _initcalled in a logic sequence:
 * 1. xen_late_init_mcelog
 * 2. mcheck_init_device
 * 3. threshold_init_device
 *
 * xen_late_init_mcelog must register xen_mce_chrdev_device before
 * native mce_chrdev_device registration if running under xen platform;
 *
 * mcheck_init_device should be inited before threshold_init_device to
 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
 *
 * so we use following _initcalls
 * 1. device_initcall(xen_late_init_mcelog);
 * 2. device_initcall_sync(mcheck_init_device);
 * 3. late_initcall(threshold_init_device);
 *
 * when running under xen, the initcall order is 1,2,3;
 * on baremetal, we skip 1 and we do only 2 and 3.
 */
late_initcall(threshold_init_device);