mce_amd.c 34.2 KB
Newer Older
1
/*
2
 *  (c) 2005-2016 Advanced Micro Devices, Inc.
3 4 5 6 7
 *  Your use of this code is subject to the terms and conditions of the
 *  GNU general public license version 2. See "COPYING" or
 *  http://www.gnu.org/licenses/gpl.html
 *
 *  Written by Jacob Shin - AMD, Inc.
8
 *  Maintained by: Borislav Petkov <bp@alien8.de>
9
 *
B
Borislav Petkov 已提交
10
 *  All MC4_MISCi registers are shared between cores on a node.
11 12 13
 */
#include <linux/interrupt.h>
#include <linux/notifier.h>
I
Ingo Molnar 已提交
14
#include <linux/kobject.h>
15
#include <linux/percpu.h>
I
Ingo Molnar 已提交
16 17
#include <linux/errno.h>
#include <linux/sched.h>
18
#include <linux/sysfs.h>
19
#include <linux/slab.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/smp.h>
23
#include <linux/string.h>
I
Ingo Molnar 已提交
24

25
#include <asm/amd_nb.h>
26 27 28
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
29
#include <asm/trace/irq_vectors.h>
30

B
Borislav Petkov 已提交
31 32
#include "mce-internal.h"

33
#define NR_BLOCKS         5
J
Jacob Shin 已提交
34 35 36
#define THRESHOLD_MAX     0xFFF
#define INT_TYPE_APIC     0x00020000
#define MASK_VALID_HI     0x80000000
37 38
#define MASK_CNTP_HI      0x40000000
#define MASK_LOCKED_HI    0x20000000
J
Jacob Shin 已提交
39 40 41 42
#define MASK_LVTOFF_HI    0x00F00000
#define MASK_COUNT_EN_HI  0x00080000
#define MASK_INT_TYPE_HI  0x00060000
#define MASK_OVERFLOW_HI  0x00010000
43
#define MASK_ERR_COUNT_HI 0x00000FFF
44 45
#define MASK_BLKPTR_LO    0xFF000000
#define MCG_XBLK_ADDR     0xC0000400
46

47 48 49 50 51 52 53
/* Deferred error settings */
#define MSR_CU_DEF_ERR		0xC0000410
#define MASK_DEF_LVTOFF		0x000000F0
#define MASK_DEF_INT_TYPE	0x00000006
#define DEF_LVT_OFF		0x2
#define DEF_INT_TYPE_APIC	0x2

54 55 56 57 58
/* Scalable MCA: */

/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF	0xF000

59 60
static bool thresholding_en;

61 62 63 64
static const char * const th_names[] = {
	"load_store",
	"insn_fetch",
	"combined_unit",
65
	"decode_unit",
66 67 68 69
	"northbridge",
	"execution_unit",
};

70 71 72 73 74
static const char * const smca_umc_block_names[] = {
	"dram_ecc",
	"misc_umc"
};

B
Borislav Petkov 已提交
75 76 77 78 79 80
struct smca_bank_name {
	const char *name;	/* Short name for sysfs */
	const char *long_name;	/* Long name for pretty-printing */
};

static struct smca_bank_name smca_names[] = {
81 82 83 84
	[SMCA_LS]	= { "load_store",	"Load Store Unit" },
	[SMCA_IF]	= { "insn_fetch",	"Instruction Fetch Unit" },
	[SMCA_L2_CACHE]	= { "l2_cache",		"L2 Cache" },
	[SMCA_DE]	= { "decode_unit",	"Decode Unit" },
85
	[SMCA_RESERVED]	= { "reserved",		"Reserved" },
86 87 88 89 90 91 92 93 94
	[SMCA_EX]	= { "execution_unit",	"Execution Unit" },
	[SMCA_FP]	= { "floating_point",	"Floating Point Unit" },
	[SMCA_L3_CACHE]	= { "l3_cache",		"L3 Cache" },
	[SMCA_CS]	= { "coherent_slave",	"Coherent Slave" },
	[SMCA_PIE]	= { "pie",		"Power, Interrupts, etc." },
	[SMCA_UMC]	= { "umc",		"Unified Memory Controller" },
	[SMCA_PB]	= { "param_block",	"Parameter Block" },
	[SMCA_PSP]	= { "psp",		"Platform Security Processor" },
	[SMCA_SMU]	= { "smu",		"System Management Unit" },
95
};
B
Borislav Petkov 已提交
96

97 98 99 100 101
static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
{
	[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};

B
Borislav Petkov 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
const char *smca_get_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].name;
}

const char *smca_get_long_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].long_name;
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
118

119
static enum smca_bank_types smca_get_bank_type(unsigned int bank)
120 121 122
{
	struct smca_bank *b;

123
	if (bank >= MAX_NR_BANKS)
124 125
		return N_SMCA_BANK_TYPES;

126
	b = &smca_banks[bank];
127 128 129 130 131 132
	if (!b->hwid)
		return N_SMCA_BANK_TYPES;

	return b->hwid->bank_type;
}

133
static struct smca_hwid smca_hwid_mcatypes[] = {
134 135
	/* { bank_type, hwid_mcatype, xec_bitmap } */

136 137 138
	/* Reserved type */
	{ SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	/* ZN Core (HWID=0xB0) MCA types */
	{ SMCA_LS,	 HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
	{ SMCA_IF,	 HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
	{ SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
	{ SMCA_DE,	 HWID_MCATYPE(0xB0, 0x3), 0x1FF },
	/* HWID 0xB0 MCATYPE 0x4 is Reserved */
	{ SMCA_EX,	 HWID_MCATYPE(0xB0, 0x5), 0x7FF },
	{ SMCA_FP,	 HWID_MCATYPE(0xB0, 0x6), 0x7F },
	{ SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },

	/* Data Fabric MCA types */
	{ SMCA_CS,	 HWID_MCATYPE(0x2E, 0x0), 0x1FF },
	{ SMCA_PIE,	 HWID_MCATYPE(0x2E, 0x1), 0xF },

	/* Unified Memory Controller MCA type */
	{ SMCA_UMC,	 HWID_MCATYPE(0x96, 0x0), 0x3F },

	/* Parameter Block MCA type */
	{ SMCA_PB,	 HWID_MCATYPE(0x05, 0x0), 0x1 },
158

159 160 161 162 163
	/* Platform Security Processor MCA type */
	{ SMCA_PSP,	 HWID_MCATYPE(0xFF, 0x0), 0x1 },

	/* System Management Unit MCA type */
	{ SMCA_SMU,	 HWID_MCATYPE(0x01, 0x0), 0x1 },
164
};
165

166
struct smca_bank smca_banks[MAX_NR_BANKS];
167
EXPORT_SYMBOL_GPL(smca_banks);
168

169 170 171 172 173 174 175 176 177 178 179
/*
 * In SMCA enabled processors, we can have multiple banks for a given IP type.
 * So to define a unique name for each bank, we use a temp c-string to append
 * the MCA_IPID[InstanceId] to type's name in get_name().
 *
 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
 */
#define MAX_MCATYPE_NAME_LEN	30
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];

180
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
181
static DEFINE_PER_CPU(unsigned int, bank_map);	/* see which banks are on */
182

183
static void amd_threshold_interrupt(void);
184 185 186 187 188 189 190
static void amd_deferred_error_interrupt(void);

static void default_deferred_error_interrupt(void)
{
	pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
191

192
static void smca_configure(unsigned int bank, unsigned int cpu)
193
{
194
	unsigned int i, hwid_mcatype;
195
	struct smca_hwid *s_hwid;
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	u32 high, low;
	u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);

	/* Set appropriate bits in MCA_CONFIG */
	if (!rdmsr_safe(smca_config, &low, &high)) {
		/*
		 * OS is required to set the MCAX bit to acknowledge that it is
		 * now using the new MSR ranges and new registers under each
		 * bank. It also means that the OS will configure deferred
		 * errors in the new MCx_CONFIG register. If the bit is not set,
		 * uncorrectable errors will cause a system panic.
		 *
		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
		 */
		high |= BIT(0);

		/*
		 * SMCA sets the Deferred Error Interrupt type per bank.
		 *
		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
		 * if the DeferredIntType bit field is available.
		 *
		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
		 * high portion of the MSR). OS should set this to 0x1 to enable
		 * APIC based interrupt. First, check that no interrupt has been
		 * set.
		 */
		if ((low & BIT(5)) && !((high >> 5) & 0x3))
			high |= BIT(5);

		wrmsr(smca_config, low, high);
	}
228

229 230
	/* Return early if this bank was already initialized. */
	if (smca_banks[bank].hwid)
231 232
		return;

233
	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
234 235 236 237
		pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
		return;
	}

238 239
	hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
				    (high & MCI_IPID_MCATYPE) >> 16);
240 241

	for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
242 243 244
		s_hwid = &smca_hwid_mcatypes[i];
		if (hwid_mcatype == s_hwid->hwid_mcatype) {
			smca_banks[bank].hwid = s_hwid;
245
			smca_banks[bank].id = low;
246
			smca_banks[bank].sysfs_id = s_hwid->count++;
247 248 249 250 251
			break;
		}
	}
}

252
struct thresh_restart {
I
Ingo Molnar 已提交
253 254
	struct threshold_block	*b;
	int			reset;
255 256
	int			set_lvt_off;
	int			lvt_off;
I
Ingo Molnar 已提交
257
	u16			old_limit;
258 259
};

260 261
static inline bool is_shared_bank(int bank)
{
262 263 264 265 266 267 268
	/*
	 * Scalable MCA provides for only one core to have access to the MSRs of
	 * a shared bank.
	 */
	if (mce_flags.smca)
		return false;

269 270 271 272
	/* Bank 4 is for northbridge reporting and is thus shared */
	return (bank == 4);
}

273
static const char *bank4_names(const struct threshold_block *b)
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
{
	switch (b->address) {
	/* MSR4_MISC0 */
	case 0x00000413:
		return "dram";

	case 0xc0000408:
		return "ht_links";

	case 0xc0000409:
		return "l3_cache";

	default:
		WARN(1, "Funny MSR: 0x%08x\n", b->address);
		return "";
	}
};


293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{
	/*
	 * bank 4 supports APIC LVT interrupts implicitly since forever.
	 */
	if (bank == 4)
		return true;

	/*
	 * IntP: interrupt present; if this bit is set, the thresholding
	 * bank can generate APIC LVT interrupts
	 */
	return msr_high_bits & BIT(28);
}

308 309 310 311 312 313 314 315 316 317 318 319
static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
{
	int msr = (hi & MASK_LVTOFF_HI) >> 20;

	if (apic < 0) {
		pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
		       b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	if (apic != msr) {
320 321 322 323 324 325 326 327
		/*
		 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
		 * the BIOS provides the value. The original field where LVT offset
		 * was set is reserved. Return early here:
		 */
		if (mce_flags.smca)
			return 0;

328 329 330 331 332 333 334 335 336
		pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	return 1;
};

337
/* Reprogram MCx_MISC MSR behind this threshold bank. */
338
static void threshold_restart_bank(void *_tr)
339
{
340
	struct thresh_restart *tr = _tr;
341
	u32 hi, lo;
342

343
	rdmsr(tr->b->address, lo, hi);
344

345
	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
346
		tr->reset = 1;	/* limit cannot be lower than err count */
347

348
	if (tr->reset) {		/* reset err count and overflow bit */
349 350
		hi =
		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
351 352
		    (THRESHOLD_MAX - tr->b->threshold_limit);
	} else if (tr->old_limit) {	/* change limit w/o reset */
353
		int new_count = (hi & THRESHOLD_MAX) +
354
		    (tr->old_limit - tr->b->threshold_limit);
I
Ingo Molnar 已提交
355

356
		hi = (hi & ~MASK_ERR_COUNT_HI) |
357 358 359
		    (new_count & THRESHOLD_MAX);
	}

360 361 362 363 364 365
	/* clear IntType */
	hi &= ~MASK_INT_TYPE_HI;

	if (!tr->b->interrupt_capable)
		goto done;

366
	if (tr->set_lvt_off) {
367 368 369 370 371
		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
			/* set new lvt offset */
			hi &= ~MASK_LVTOFF_HI;
			hi |= tr->lvt_off << 20;
		}
372 373
	}

374 375 376 377
	if (tr->b->interrupt_enable)
		hi |= INT_TYPE_APIC;

 done:
378

379 380
	hi |= MASK_COUNT_EN_HI;
	wrmsr(tr->b->address, lo, hi);
381 382
}

383 384 385 386 387 388 389 390 391 392 393 394
static void mce_threshold_block_init(struct threshold_block *b, int offset)
{
	struct thresh_restart tr = {
		.b			= b,
		.set_lvt_off		= 1,
		.lvt_off		= offset,
	};

	b->threshold_limit		= THRESHOLD_MAX;
	threshold_restart_bank(&tr);
};

395
static int setup_APIC_mce_threshold(int reserved, int new)
396 397 398 399 400 401 402 403
{
	if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static int setup_APIC_deferred_error(int reserved, int new)
{
	if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
{
	u32 low = 0, high = 0;
	int def_offset = -1, def_new;

	if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
		return;

	def_new = (low & MASK_DEF_LVTOFF) >> 4;
	if (!(low & MASK_DEF_LVTOFF)) {
		pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
		def_new = DEF_LVT_OFF;
		low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
	}

	def_offset = setup_APIC_deferred_error(def_offset, def_new);
	if ((def_offset == def_new) &&
	    (deferred_error_int_vector != amd_deferred_error_interrupt))
		deferred_error_int_vector = amd_deferred_error_interrupt;

433 434 435
	if (!mce_flags.smca)
		low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;

436 437 438
	wrmsr(MSR_CU_DEF_ERR, low, high);
}

439
static u32 smca_get_block_address(unsigned int bank, unsigned int block)
440 441 442 443 444 445 446 447 448 449
{
	u32 low, high;
	u32 addr = 0;

	if (smca_get_bank_type(bank) == SMCA_RESERVED)
		return addr;

	if (!block)
		return MSR_AMD64_SMCA_MCx_MISC(bank);

450 451 452 453
	/* Check our cache first: */
	if (smca_bank_addrs[bank][block] != -1)
		return smca_bank_addrs[bank][block];

454 455 456 457
	/*
	 * For SMCA enabled processors, BLKPTR field of the first MISC register
	 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
	 */
458
	if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
459
		goto out;
460 461

	if (!(low & MCI_CONFIG_MCAX))
462
		goto out;
463

464
	if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
465
	    (low & MASK_BLKPTR_LO))
466
		addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
467

468 469
out:
	smca_bank_addrs[bank][block] = addr;
470 471 472
	return addr;
}

473
static u32 get_block_address(u32 current_addr, u32 low, u32 high,
474 475 476 477
			     unsigned int bank, unsigned int block)
{
	u32 addr = 0, offset = 0;

478 479 480
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
		return addr;

481
	if (mce_flags.smca)
482
		return smca_get_block_address(bank, block);
483 484 485 486

	/* Fall back to method we used for older processors: */
	switch (block) {
	case 0:
487
		addr = msr_ops.misc(bank);
488 489 490 491 492 493 494 495 496 497 498 499
		break;
	case 1:
		offset = ((low & MASK_BLKPTR_LO) >> 21);
		if (offset)
			addr = MCG_XBLK_ADDR + offset;
		break;
	default:
		addr = ++current_addr;
	}
	return addr;
}

500 501 502 503 504
static int
prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
			int offset, u32 misc_high)
{
	unsigned int cpu = smp_processor_id();
505
	u32 smca_low, smca_high;
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	struct threshold_block b;
	int new;

	if (!block)
		per_cpu(bank_map, cpu) |= (1 << bank);

	memset(&b, 0, sizeof(b));
	b.cpu			= cpu;
	b.bank			= bank;
	b.block			= block;
	b.address		= addr;
	b.interrupt_capable	= lvt_interrupt_supported(bank, misc_high);

	if (!b.interrupt_capable)
		goto done;

	b.interrupt_enable = 1;

524 525 526 527
	if (!mce_flags.smca) {
		new = (misc_high & MASK_LVTOFF_HI) >> 20;
		goto set_offset;
	}
528

529 530 531 532 533 534 535
	/* Gather LVT offset for thresholding: */
	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
		goto out;

	new = (smca_low & SMCA_THR_LVT_OFF) >> 12;

set_offset:
536 537 538 539 540 541 542 543 544 545 546 547
	offset = setup_APIC_mce_threshold(offset, new);

	if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
		mce_threshold_vector = amd_threshold_interrupt;

done:
	mce_threshold_block_init(&b, offset);

out:
	return offset;
}

548
/* cpu init entry point, called from mce.c with preempt off */
549
void mce_amd_feature_init(struct cpuinfo_x86 *c)
550
{
551
	u32 low = 0, high = 0, address = 0;
552
	unsigned int bank, block, cpu = smp_processor_id();
553
	int offset = -1;
554

555
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
556
		if (mce_flags.smca)
557
			smca_configure(bank, cpu);
558

559
		for (block = 0; block < NR_BLOCKS; ++block) {
560
			address = get_block_address(address, low, high, bank, block);
561 562
			if (!address)
				break;
563 564

			if (rdmsr_safe(address, &low, &high))
565
				break;
566

567 568
			if (!(high & MASK_VALID_HI))
				continue;
569

570 571
			if (!(high & MASK_CNTP_HI)  ||
			     (high & MASK_LOCKED_HI))
572 573
				continue;

574
			offset = prepare_threshold_block(bank, block, address, offset, high);
575
		}
576
	}
577 578 579

	if (mce_flags.succor)
		deferred_error_interrupt_enable(c);
580 581
}

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
	/* We start from the normalized address */
	u64 ret_addr = norm_addr;

	u32 tmp;

	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
	u8 intlv_addr_sel, intlv_addr_bit;
	u8 num_intlv_bits, hashed_bit;
	u8 lgcy_mmio_hole_en, base = 0;
	u8 cs_mask, cs_id = 0;
	bool hash_enabled = false;

	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
	if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
		goto out_err;

	/* Remove HiAddrOffset from normalized address, if enabled: */
	if (tmp & BIT(0)) {
		u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;

		if (norm_addr >= hi_addr_offset) {
			ret_addr -= hi_addr_offset;
			base = 1;
		}
	}

	/* Read D18F0x110 (DramBaseAddress). */
	if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
		goto out_err;

	/* Check if address range is valid. */
	if (!(tmp & BIT(0))) {
		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
			__func__, tmp);
		goto out_err;
	}

	lgcy_mmio_hole_en = tmp & BIT(1);
	intlv_num_chan	  = (tmp >> 4) & 0xF;
	intlv_addr_sel	  = (tmp >> 8) & 0x7;
	dram_base_addr	  = (tmp & GENMASK_ULL(31, 12)) << 16;

	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
	if (intlv_addr_sel > 3) {
		pr_err("%s: Invalid interleave address select %d.\n",
			__func__, intlv_addr_sel);
		goto out_err;
	}

	/* Read D18F0x114 (DramLimitAddress). */
	if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
		goto out_err;

	intlv_num_sockets = (tmp >> 8) & 0x1;
	intlv_num_dies	  = (tmp >> 10) & 0x3;
	dram_limit_addr	  = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);

	intlv_addr_bit = intlv_addr_sel + 8;

	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
	switch (intlv_num_chan) {
	case 0:	intlv_num_chan = 0; break;
	case 1: intlv_num_chan = 1; break;
	case 3: intlv_num_chan = 2; break;
	case 5:	intlv_num_chan = 3; break;
	case 7:	intlv_num_chan = 4; break;

	case 8: intlv_num_chan = 1;
		hash_enabled = true;
		break;
	default:
		pr_err("%s: Invalid number of interleaved channels %d.\n",
			__func__, intlv_num_chan);
		goto out_err;
	}

	num_intlv_bits = intlv_num_chan;

	if (intlv_num_dies > 2) {
		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
			__func__, intlv_num_dies);
		goto out_err;
	}

	num_intlv_bits += intlv_num_dies;

	/* Add a bit if sockets are interleaved. */
	num_intlv_bits += intlv_num_sockets;

	/* Assert num_intlv_bits <= 4 */
	if (num_intlv_bits > 4) {
		pr_err("%s: Invalid interleave bits %d.\n",
			__func__, num_intlv_bits);
		goto out_err;
	}

	if (num_intlv_bits > 0) {
		u64 temp_addr_x, temp_addr_i, temp_addr_y;
		u8 die_id_bit, sock_id_bit, cs_fabric_id;

		/*
		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
		 * This is the fabric id for this coherent slave. Use
		 * umc/channel# as instance id of the coherent slave
		 * for FICAA.
		 */
		if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
			goto out_err;

		cs_fabric_id = (tmp >> 8) & 0xFF;
		die_id_bit   = 0;

		/* If interleaved over more than 1 channel: */
		if (intlv_num_chan) {
			die_id_bit = intlv_num_chan;
			cs_mask	   = (1 << die_id_bit) - 1;
			cs_id	   = cs_fabric_id & cs_mask;
		}

		sock_id_bit = die_id_bit;

		/* Read D18F1x208 (SystemFabricIdMask). */
		if (intlv_num_dies || intlv_num_sockets)
			if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
				goto out_err;

		/* If interleaved over more than 1 die. */
		if (intlv_num_dies) {
			sock_id_bit  = die_id_bit + intlv_num_dies;
			die_id_shift = (tmp >> 24) & 0xF;
			die_id_mask  = (tmp >> 8) & 0xFF;

			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
		}

		/* If interleaved over more than 1 socket. */
		if (intlv_num_sockets) {
			socket_id_shift	= (tmp >> 28) & 0xF;
			socket_id_mask	= (tmp >> 16) & 0xFF;

			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
		}

		/*
		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
		 * where III is the ID for this CS, and XXXXXXYYYYY are the
		 * address bits from the post-interleaved address.
		 * "num_intlv_bits" has been calculated to tell us how many "I"
		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
		 * there are (where "I" starts).
		 */
		temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
		temp_addr_i = (cs_id << intlv_addr_bit);
		temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
		ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
	}

	/* Add dram base address */
	ret_addr += dram_base_addr;

	/* If legacy MMIO hole enabled */
	if (lgcy_mmio_hole_en) {
		if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
			goto out_err;

		dram_hole_base = tmp & GENMASK(31, 24);
		if (ret_addr >= dram_hole_base)
			ret_addr += (BIT_ULL(32) - dram_hole_base);
	}

	if (hash_enabled) {
		/* Save some parentheses and grab ls-bit at the end. */
		hashed_bit =	(ret_addr >> 12) ^
				(ret_addr >> 18) ^
				(ret_addr >> 21) ^
				(ret_addr >> 30) ^
				cs_id;

		hashed_bit &= BIT(0);

		if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
			ret_addr ^= BIT(intlv_addr_bit);
	}

	/* Is calculated system address is above DRAM limit address? */
	if (ret_addr > dram_limit_addr)
		goto out_err;

	*sys_addr = ret_addr;
	return 0;

out_err:
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);

782 783 784 785 786 787
bool amd_mce_is_memory_error(struct mce *m)
{
	/* ErrCodeExt[20:16] */
	u8 xec = (m->status >> 16) & 0x1f;

	if (mce_flags.smca)
788
		return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
789 790 791 792

	return m->bank == 4 && xec == 0x8;
}

793
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
794 795 796 797 798 799
{
	struct mce m;

	mce_setup(&m);

	m.status = status;
800
	m.misc   = misc;
801 802
	m.bank   = bank;
	m.tsc	 = rdtsc();
803

804
	if (m.status & MCI_STATUS_ADDRV) {
805
		m.addr = addr;
806

807 808 809 810 811 812 813 814 815 816 817
		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m.addr >> 56) & 0x3f;

			m.addr &= GENMASK_ULL(55, lsb);
		}
	}

818 819 820 821 822 823
	if (mce_flags.smca) {
		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);

		if (m.status & MCI_STATUS_SYNDV)
			rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
	}
824

825
	mce_log(&m);
826 827
}

828
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
829 830 831
{
	entering_irq();
	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
832 833
	inc_irq_stat(irq_deferred_error_count);
	deferred_error_int_vector();
834 835 836 837
	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
	exiting_ack_irq();
}

838 839 840 841 842
/*
 * Returns true if the logged error is deferred. False, otherwise.
 */
static inline bool
_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
843
{
844
	u64 status, addr = 0;
845

846 847 848
	rdmsrl(msr_stat, status);
	if (!(status & MCI_STATUS_VAL))
		return false;
849

850 851
	if (status & MCI_STATUS_ADDRV)
		rdmsrl(msr_addr, addr);
852

853
	__log_error(bank, status, addr, misc);
854

855
	wrmsrl(msr_stat, 0);
856 857

	return status & MCI_STATUS_DEFERRED;
858 859
}

860
/*
861 862 863 864 865 866 867
 * We have three scenarios for checking for Deferred errors:
 *
 * 1) Non-SMCA systems check MCA_STATUS and log error if found.
 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
 *    clear MCA_DESTAT.
 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
 *    log it.
868
 */
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
static void log_error_deferred(unsigned int bank)
{
	bool defrd;

	defrd = _log_error_bank(bank, msr_ops.status(bank),
					msr_ops.addr(bank), 0);

	if (!mce_flags.smca)
		return;

	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
	if (defrd) {
		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
		return;
	}

	/*
	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
	 * for a valid error.
	 */
	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
}

/* APIC interrupt handler for deferred errors */
static void amd_deferred_error_interrupt(void)
{
	unsigned int bank;

	for (bank = 0; bank < mca_cfg.banks; ++bank)
		log_error_deferred(bank);
}

static void log_error_thresholding(unsigned int bank, u64 misc)
{
	_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
}
906

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
static void log_and_reset_block(struct threshold_block *block)
{
	struct thresh_restart tr;
	u32 low = 0, high = 0;

	if (!block)
		return;

	if (rdmsr_safe(block->address, &low, &high))
		return;

	if (!(high & MASK_OVERFLOW_HI))
		return;

	/* Log the MCE which caused the threshold event. */
	log_error_thresholding(block->bank, ((u64)high << 32) | low);

	/* Reset threshold block after logging error. */
	memset(&tr, 0, sizeof(tr));
	tr.b = block;
	threshold_restart_bank(&tr);
}

930
/*
931 932
 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
 * goes off when error_count reaches threshold_limit.
933
 */
934
static void amd_threshold_interrupt(void)
935
{
936 937
	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
	unsigned int bank, cpu = smp_processor_id();
938

939
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
940
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
941
			continue;
942

943 944 945
		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
		if (!first_block)
			continue;
946

947 948 949 950 951 952 953
		/*
		 * The first block is also the head of the list. Check it first
		 * before iterating over the rest.
		 */
		log_and_reset_block(first_block);
		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
			log_and_reset_block(block);
954
	}
955 956 957 958 959 960 961
}

/*
 * Sysfs Interface
 */

struct threshold_attr {
J
Jacob Shin 已提交
962
	struct attribute attr;
I
Ingo Molnar 已提交
963 964
	ssize_t (*show) (struct threshold_block *, char *);
	ssize_t (*store) (struct threshold_block *, const char *, size_t count);
965 966
};

I
Ingo Molnar 已提交
967 968 969
#define SHOW_FIELDS(name)						\
static ssize_t show_ ## name(struct threshold_block *b, char *buf)	\
{									\
970
	return sprintf(buf, "%lu\n", (unsigned long) b->name);		\
J
Jacob Shin 已提交
971
}
972 973 974
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)

I
Ingo Molnar 已提交
975
static ssize_t
H
Hidetoshi Seto 已提交
976
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
977
{
978
	struct thresh_restart tr;
I
Ingo Molnar 已提交
979 980
	unsigned long new;

981 982 983
	if (!b->interrupt_capable)
		return -EINVAL;

984
	if (kstrtoul(buf, 0, &new) < 0)
985
		return -EINVAL;
I
Ingo Molnar 已提交
986

987 988
	b->interrupt_enable = !!new;

989
	memset(&tr, 0, sizeof(tr));
I
Ingo Molnar 已提交
990 991
	tr.b		= b;

992
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
993

H
Hidetoshi Seto 已提交
994
	return size;
995 996
}

I
Ingo Molnar 已提交
997
static ssize_t
H
Hidetoshi Seto 已提交
998
store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
999
{
1000
	struct thresh_restart tr;
I
Ingo Molnar 已提交
1001 1002
	unsigned long new;

1003
	if (kstrtoul(buf, 0, &new) < 0)
1004
		return -EINVAL;
I
Ingo Molnar 已提交
1005

1006 1007 1008 1009
	if (new > THRESHOLD_MAX)
		new = THRESHOLD_MAX;
	if (new < 1)
		new = 1;
I
Ingo Molnar 已提交
1010

1011
	memset(&tr, 0, sizeof(tr));
1012
	tr.old_limit = b->threshold_limit;
1013
	b->threshold_limit = new;
1014
	tr.b = b;
1015

1016
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1017

H
Hidetoshi Seto 已提交
1018
	return size;
1019 1020
}

1021 1022
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
1023 1024 1025
	u32 lo, hi;

	rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
1026

1027 1028
	return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
				     (THRESHOLD_MAX - b->threshold_limit)));
1029 1030
}

1031 1032 1033 1034
static struct threshold_attr error_count = {
	.attr = {.name = __stringify(error_count), .mode = 0444 },
	.show = show_error_count,
};
1035

1036 1037 1038 1039 1040
#define RW_ATTR(val)							\
static struct threshold_attr val = {					\
	.attr	= {.name = __stringify(val), .mode = 0644 },		\
	.show	= show_## val,						\
	.store	= store_## val,						\
1041 1042
};

J
Jacob Shin 已提交
1043 1044
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
1045 1046 1047 1048

static struct attribute *default_attrs[] = {
	&threshold_limit.attr,
	&error_count.attr,
1049 1050
	NULL,	/* possibly interrupt_enable if supported, see below */
	NULL,
1051 1052
};

I
Ingo Molnar 已提交
1053 1054
#define to_block(k)	container_of(k, struct threshold_block, kobj)
#define to_attr(a)	container_of(a, struct threshold_attr, attr)
1055 1056 1057

static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
1058
	struct threshold_block *b = to_block(kobj);
1059 1060
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1061

1062
	ret = a->show ? a->show(b, buf) : -EIO;
I
Ingo Molnar 已提交
1063

1064 1065 1066 1067 1068 1069
	return ret;
}

static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
{
1070
	struct threshold_block *b = to_block(kobj);
1071 1072
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1073

1074
	ret = a->store ? a->store(b, buf, count) : -EIO;
I
Ingo Molnar 已提交
1075

1076 1077 1078
	return ret;
}

1079
static const struct sysfs_ops threshold_ops = {
I
Ingo Molnar 已提交
1080 1081
	.show			= show,
	.store			= store,
1082 1083 1084
};

static struct kobj_type threshold_ktype = {
I
Ingo Molnar 已提交
1085 1086
	.sysfs_ops		= &threshold_ops,
	.default_attrs		= default_attrs,
1087 1088
};

1089 1090
static const char *get_name(unsigned int bank, struct threshold_block *b)
{
1091
	enum smca_bank_types bank_type;
1092 1093 1094 1095 1096 1097 1098 1099

	if (!mce_flags.smca) {
		if (b && bank == 4)
			return bank4_names(b);

		return th_names[bank];
	}

1100 1101
	bank_type = smca_get_bank_type(bank);
	if (bank_type >= N_SMCA_BANK_TYPES)
1102 1103 1104 1105 1106 1107 1108 1109
		return NULL;

	if (b && bank_type == SMCA_UMC) {
		if (b->block < ARRAY_SIZE(smca_umc_block_names))
			return smca_umc_block_names[b->block];
		return NULL;
	}

1110 1111 1112
	if (smca_banks[bank].hwid->count == 1)
		return smca_get_name(bank_type);

1113
	snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
B
Borislav Petkov 已提交
1114
		 "%s_%x", smca_get_name(bank_type),
1115
			  smca_banks[bank].sysfs_id);
1116 1117 1118
	return buf_mcatype;
}

1119 1120
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
				     unsigned int block, u32 address)
1121 1122
{
	struct threshold_block *b = NULL;
I
Ingo Molnar 已提交
1123 1124
	u32 low, high;
	int err;
1125

1126
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1127 1128
		return 0;

1129
	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1130
		return 0;
1131 1132 1133 1134 1135 1136 1137 1138

	if (!(high & MASK_VALID_HI)) {
		if (block)
			goto recurse;
		else
			return 0;
	}

1139 1140
	if (!(high & MASK_CNTP_HI)  ||
	     (high & MASK_LOCKED_HI))
1141 1142 1143 1144 1145 1146
		goto recurse;

	b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
	if (!b)
		return -ENOMEM;

I
Ingo Molnar 已提交
1147 1148 1149 1150 1151
	b->block		= block;
	b->bank			= bank;
	b->cpu			= cpu;
	b->address		= address;
	b->interrupt_enable	= 0;
1152
	b->interrupt_capable	= lvt_interrupt_supported(bank, high);
I
Ingo Molnar 已提交
1153
	b->threshold_limit	= THRESHOLD_MAX;
1154

1155
	if (b->interrupt_capable) {
1156
		threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1157 1158
		b->interrupt_enable = 1;
	} else {
1159
		threshold_ktype.default_attrs[2] = NULL;
1160
	}
1161

1162 1163
	INIT_LIST_HEAD(&b->miscj);

I
Ingo Molnar 已提交
1164
	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1165 1166
		list_add(&b->miscj,
			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
I
Ingo Molnar 已提交
1167
	} else {
1168
		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
I
Ingo Molnar 已提交
1169
	}
1170

1171 1172
	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
				   per_cpu(threshold_banks, cpu)[bank]->kobj,
1173
				   get_name(bank, b));
1174 1175 1176
	if (err)
		goto out_free;
recurse:
1177
	address = get_block_address(address, low, high, bank, ++block);
1178 1179
	if (!address)
		return 0;
1180

1181
	err = allocate_threshold_blocks(cpu, bank, block, address);
1182 1183 1184
	if (err)
		goto out_free;

1185 1186
	if (b)
		kobject_uevent(&b->kobj, KOBJ_ADD);
1187

1188 1189 1190 1191
	return err;

out_free:
	if (b) {
1192
		kobject_put(&b->kobj);
1193
		list_del(&b->miscj);
1194 1195 1196 1197 1198
		kfree(b);
	}
	return err;
}

1199
static int __threshold_add_blocks(struct threshold_bank *b)
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
{
	struct list_head *head = &b->blocks->miscj;
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	int err = 0;

	err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
	if (err)
		return err;

	list_for_each_entry_safe(pos, tmp, head, miscj) {

		err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
		if (err) {
			list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
				kobject_del(&pos->kobj);

			return err;
		}
	}
	return err;
}

1223
static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1224
{
1225
	struct device *dev = per_cpu(mce_device, cpu);
1226
	struct amd_northbridge *nb = NULL;
1227
	struct threshold_bank *b = NULL;
1228
	const char *name = get_name(bank, NULL);
1229
	int err = 0;
1230

1231 1232 1233
	if (!dev)
		return -ENODEV;

1234
	if (is_shared_bank(bank)) {
1235 1236 1237
		nb = node_to_amd_nb(amd_get_nb_id(cpu));

		/* threshold descriptor already initialized on this node? */
1238
		if (nb && nb->bank4) {
1239 1240 1241 1242 1243 1244 1245
			/* yes, use it */
			b = nb->bank4;
			err = kobject_add(b->kobj, &dev->kobj, name);
			if (err)
				goto out;

			per_cpu(threshold_banks, cpu)[bank] = b;
1246
			refcount_inc(&b->cpus);
1247 1248 1249 1250 1251 1252 1253

			err = __threshold_add_blocks(b);

			goto out;
		}
	}

1254
	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1255 1256 1257 1258 1259
	if (!b) {
		err = -ENOMEM;
		goto out;
	}

1260
	b->kobj = kobject_create_and_add(name, &dev->kobj);
1261 1262
	if (!b->kobj) {
		err = -EINVAL;
1263
		goto out_free;
1264
	}
1265

1266
	per_cpu(threshold_banks, cpu)[bank] = b;
1267

1268
	if (is_shared_bank(bank)) {
1269
		refcount_set(&b->cpus, 1);
1270 1271

		/* nb is already initialized, see above */
1272 1273 1274 1275
		if (nb) {
			WARN_ON(nb->bank4);
			nb->bank4 = b;
		}
1276 1277
	}

1278
	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1279 1280
	if (!err)
		goto out;
1281

1282
 out_free:
1283
	kfree(b);
1284 1285

 out:
1286 1287 1288
	return err;
}

1289
static void deallocate_threshold_block(unsigned int cpu,
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
						 unsigned int bank)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];

	if (!head)
		return;

	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1300
		kobject_put(&pos->kobj);
1301 1302 1303 1304 1305 1306 1307 1308
		list_del(&pos->miscj);
		kfree(pos);
	}

	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
}

1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
static void __threshold_remove_blocks(struct threshold_bank *b)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;

	kobject_del(b->kobj);

	list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
		kobject_del(&pos->kobj);
}

1320
static void threshold_remove_bank(unsigned int cpu, int bank)
1321
{
1322
	struct amd_northbridge *nb;
1323 1324 1325 1326 1327
	struct threshold_bank *b;

	b = per_cpu(threshold_banks, cpu)[bank];
	if (!b)
		return;
1328

1329 1330 1331
	if (!b->blocks)
		goto free_out;

1332
	if (is_shared_bank(bank)) {
1333
		if (!refcount_dec_and_test(&b->cpus)) {
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
			__threshold_remove_blocks(b);
			per_cpu(threshold_banks, cpu)[bank] = NULL;
			return;
		} else {
			/*
			 * the last CPU on this node using the shared bank is
			 * going away, remove that bank now.
			 */
			nb = node_to_amd_nb(amd_get_nb_id(cpu));
			nb->bank4 = NULL;
		}
	}

1347 1348 1349
	deallocate_threshold_block(cpu, bank);

free_out:
1350
	kobject_del(b->kobj);
1351
	kobject_put(b->kobj);
1352 1353
	kfree(b);
	per_cpu(threshold_banks, cpu)[bank] = NULL;
1354 1355
}

1356
int mce_threshold_remove_device(unsigned int cpu)
1357
{
J
Jacob Shin 已提交
1358
	unsigned int bank;
1359

1360 1361 1362
	if (!thresholding_en)
		return 0;

1363
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
1364
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1365 1366 1367
			continue;
		threshold_remove_bank(cpu, bank);
	}
1368
	kfree(per_cpu(threshold_banks, cpu));
1369
	per_cpu(threshold_banks, cpu) = NULL;
1370
	return 0;
1371 1372
}

1373
/* create dir/files for all valid threshold banks */
1374
int mce_threshold_create_device(unsigned int cpu)
1375
{
1376 1377 1378 1379
	unsigned int bank;
	struct threshold_bank **bp;
	int err = 0;

1380 1381 1382
	if (!thresholding_en)
		return 0;

1383 1384 1385 1386
	bp = per_cpu(threshold_banks, cpu);
	if (bp)
		return 0;

K
Kees Cook 已提交
1387
	bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
		     GFP_KERNEL);
	if (!bp)
		return -ENOMEM;

	per_cpu(threshold_banks, cpu) = bp;

	for (bank = 0; bank < mca_cfg.banks; ++bank) {
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
			continue;
		err = threshold_create_bank(cpu, bank);
		if (err)
1399
			goto err;
1400
	}
1401 1402
	return err;
err:
1403
	mce_threshold_remove_device(cpu);
1404
	return err;
1405 1406 1407 1408
}

static __init int threshold_init_device(void)
{
J
Jacob Shin 已提交
1409
	unsigned lcpu = 0;
1410

1411 1412 1413
	if (mce_threshold_vector == amd_threshold_interrupt)
		thresholding_en = true;

1414 1415
	/* to hit CPUs online before the notifier is up */
	for_each_online_cpu(lcpu) {
1416
		int err = mce_threshold_create_device(lcpu);
I
Ingo Molnar 已提交
1417

1418
		if (err)
1419
			return err;
1420
	}
I
Ingo Molnar 已提交
1421

1422
	return 0;
1423
}
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
/*
 * there are 3 funcs which need to be _initcalled in a logic sequence:
 * 1. xen_late_init_mcelog
 * 2. mcheck_init_device
 * 3. threshold_init_device
 *
 * xen_late_init_mcelog must register xen_mce_chrdev_device before
 * native mce_chrdev_device registration if running under xen platform;
 *
 * mcheck_init_device should be inited before threshold_init_device to
 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
 *
 * so we use following _initcalls
 * 1. device_initcall(xen_late_init_mcelog);
 * 2. device_initcall_sync(mcheck_init_device);
 * 3. late_initcall(threshold_init_device);
 *
 * when running under xen, the initcall order is 1,2,3;
 * on baremetal, we skip 1 and we do only 2 and 3.
 */
late_initcall(threshold_init_device);