mce_amd.c 33.4 KB
Newer Older
1
/*
2
 *  (c) 2005-2016 Advanced Micro Devices, Inc.
3 4 5 6 7
 *  Your use of this code is subject to the terms and conditions of the
 *  GNU general public license version 2. See "COPYING" or
 *  http://www.gnu.org/licenses/gpl.html
 *
 *  Written by Jacob Shin - AMD, Inc.
8
 *  Maintained by: Borislav Petkov <bp@alien8.de>
9
 *
B
Borislav Petkov 已提交
10
 *  All MC4_MISCi registers are shared between cores on a node.
11 12 13
 */
#include <linux/interrupt.h>
#include <linux/notifier.h>
I
Ingo Molnar 已提交
14
#include <linux/kobject.h>
15
#include <linux/percpu.h>
I
Ingo Molnar 已提交
16 17
#include <linux/errno.h>
#include <linux/sched.h>
18
#include <linux/sysfs.h>
19
#include <linux/slab.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/smp.h>
23
#include <linux/string.h>
I
Ingo Molnar 已提交
24

25
#include <asm/amd_nb.h>
26 27 28
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
29
#include <asm/trace/irq_vectors.h>
30

31
#define NR_BLOCKS         5
J
Jacob Shin 已提交
32 33 34
#define THRESHOLD_MAX     0xFFF
#define INT_TYPE_APIC     0x00020000
#define MASK_VALID_HI     0x80000000
35 36
#define MASK_CNTP_HI      0x40000000
#define MASK_LOCKED_HI    0x20000000
J
Jacob Shin 已提交
37 38 39 40
#define MASK_LVTOFF_HI    0x00F00000
#define MASK_COUNT_EN_HI  0x00080000
#define MASK_INT_TYPE_HI  0x00060000
#define MASK_OVERFLOW_HI  0x00010000
41
#define MASK_ERR_COUNT_HI 0x00000FFF
42 43
#define MASK_BLKPTR_LO    0xFF000000
#define MCG_XBLK_ADDR     0xC0000400
44

45 46 47 48 49 50 51
/* Deferred error settings */
#define MSR_CU_DEF_ERR		0xC0000410
#define MASK_DEF_LVTOFF		0x000000F0
#define MASK_DEF_INT_TYPE	0x00000006
#define DEF_LVT_OFF		0x2
#define DEF_INT_TYPE_APIC	0x2

52 53 54 55 56
/* Scalable MCA: */

/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF	0xF000

57 58
static bool thresholding_en;

59 60 61 62
static const char * const th_names[] = {
	"load_store",
	"insn_fetch",
	"combined_unit",
63
	"decode_unit",
64 65 66 67
	"northbridge",
	"execution_unit",
};

68 69 70 71 72
static const char * const smca_umc_block_names[] = {
	"dram_ecc",
	"misc_umc"
};

B
Borislav Petkov 已提交
73 74 75 76 77 78
struct smca_bank_name {
	const char *name;	/* Short name for sysfs */
	const char *long_name;	/* Long name for pretty-printing */
};

static struct smca_bank_name smca_names[] = {
79 80 81 82 83 84 85 86 87 88 89 90 91
	[SMCA_LS]	= { "load_store",	"Load Store Unit" },
	[SMCA_IF]	= { "insn_fetch",	"Instruction Fetch Unit" },
	[SMCA_L2_CACHE]	= { "l2_cache",		"L2 Cache" },
	[SMCA_DE]	= { "decode_unit",	"Decode Unit" },
	[SMCA_EX]	= { "execution_unit",	"Execution Unit" },
	[SMCA_FP]	= { "floating_point",	"Floating Point Unit" },
	[SMCA_L3_CACHE]	= { "l3_cache",		"L3 Cache" },
	[SMCA_CS]	= { "coherent_slave",	"Coherent Slave" },
	[SMCA_PIE]	= { "pie",		"Power, Interrupts, etc." },
	[SMCA_UMC]	= { "umc",		"Unified Memory Controller" },
	[SMCA_PB]	= { "param_block",	"Parameter Block" },
	[SMCA_PSP]	= { "psp",		"Platform Security Processor" },
	[SMCA_SMU]	= { "smu",		"System Management Unit" },
92
};
B
Borislav Petkov 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

const char *smca_get_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].name;
}

const char *smca_get_long_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].long_name;
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
110

111
static struct smca_hwid smca_hwid_mcatypes[] = {
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
	/* { bank_type, hwid_mcatype, xec_bitmap } */

	/* ZN Core (HWID=0xB0) MCA types */
	{ SMCA_LS,	 HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
	{ SMCA_IF,	 HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
	{ SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
	{ SMCA_DE,	 HWID_MCATYPE(0xB0, 0x3), 0x1FF },
	/* HWID 0xB0 MCATYPE 0x4 is Reserved */
	{ SMCA_EX,	 HWID_MCATYPE(0xB0, 0x5), 0x7FF },
	{ SMCA_FP,	 HWID_MCATYPE(0xB0, 0x6), 0x7F },
	{ SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },

	/* Data Fabric MCA types */
	{ SMCA_CS,	 HWID_MCATYPE(0x2E, 0x0), 0x1FF },
	{ SMCA_PIE,	 HWID_MCATYPE(0x2E, 0x1), 0xF },

	/* Unified Memory Controller MCA type */
	{ SMCA_UMC,	 HWID_MCATYPE(0x96, 0x0), 0x3F },

	/* Parameter Block MCA type */
	{ SMCA_PB,	 HWID_MCATYPE(0x05, 0x0), 0x1 },
133

134 135 136 137 138
	/* Platform Security Processor MCA type */
	{ SMCA_PSP,	 HWID_MCATYPE(0xFF, 0x0), 0x1 },

	/* System Management Unit MCA type */
	{ SMCA_SMU,	 HWID_MCATYPE(0x01, 0x0), 0x1 },
139
};
140

141
struct smca_bank smca_banks[MAX_NR_BANKS];
142
EXPORT_SYMBOL_GPL(smca_banks);
143

144 145 146 147 148 149 150 151 152 153 154
/*
 * In SMCA enabled processors, we can have multiple banks for a given IP type.
 * So to define a unique name for each bank, we use a temp c-string to append
 * the MCA_IPID[InstanceId] to type's name in get_name().
 *
 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
 */
#define MAX_MCATYPE_NAME_LEN	30
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];

155
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
156
static DEFINE_PER_CPU(unsigned int, bank_map);	/* see which banks are on */
157

158
static void amd_threshold_interrupt(void);
159 160 161 162 163 164 165
static void amd_deferred_error_interrupt(void);

static void default_deferred_error_interrupt(void)
{
	pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
166

167 168 169
static void get_smca_bank_info(unsigned int bank)
{
	unsigned int i, hwid_mcatype, cpu = smp_processor_id();
170
	struct smca_hwid *s_hwid;
171
	u32 high, instance_id;
172 173 174 175 176

	/* Collect bank_info using CPU 0 for now. */
	if (cpu)
		return;

177
	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instance_id, &high)) {
178 179 180 181
		pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
		return;
	}

182 183
	hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
				    (high & MCI_IPID_MCATYPE) >> 16);
184 185

	for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
186 187
		s_hwid = &smca_hwid_mcatypes[i];
		if (hwid_mcatype == s_hwid->hwid_mcatype) {
B
Borislav Petkov 已提交
188 189 190 191 192

			WARN(smca_banks[bank].hwid,
			     "Bank %s already initialized!\n",
			     smca_get_name(s_hwid->bank_type));

193
			smca_banks[bank].hwid = s_hwid;
194
			smca_banks[bank].id = instance_id;
195
			smca_banks[bank].sysfs_id = s_hwid->count++;
196 197 198 199 200
			break;
		}
	}
}

201
struct thresh_restart {
I
Ingo Molnar 已提交
202 203
	struct threshold_block	*b;
	int			reset;
204 205
	int			set_lvt_off;
	int			lvt_off;
I
Ingo Molnar 已提交
206
	u16			old_limit;
207 208
};

209 210
static inline bool is_shared_bank(int bank)
{
211 212 213 214 215 216 217
	/*
	 * Scalable MCA provides for only one core to have access to the MSRs of
	 * a shared bank.
	 */
	if (mce_flags.smca)
		return false;

218 219 220 221
	/* Bank 4 is for northbridge reporting and is thus shared */
	return (bank == 4);
}

222
static const char *bank4_names(const struct threshold_block *b)
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
{
	switch (b->address) {
	/* MSR4_MISC0 */
	case 0x00000413:
		return "dram";

	case 0xc0000408:
		return "ht_links";

	case 0xc0000409:
		return "l3_cache";

	default:
		WARN(1, "Funny MSR: 0x%08x\n", b->address);
		return "";
	}
};


242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{
	/*
	 * bank 4 supports APIC LVT interrupts implicitly since forever.
	 */
	if (bank == 4)
		return true;

	/*
	 * IntP: interrupt present; if this bit is set, the thresholding
	 * bank can generate APIC LVT interrupts
	 */
	return msr_high_bits & BIT(28);
}

257 258 259 260 261 262 263 264 265 266 267 268
static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
{
	int msr = (hi & MASK_LVTOFF_HI) >> 20;

	if (apic < 0) {
		pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
		       b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	if (apic != msr) {
269 270 271 272 273 274 275 276
		/*
		 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
		 * the BIOS provides the value. The original field where LVT offset
		 * was set is reserved. Return early here:
		 */
		if (mce_flags.smca)
			return 0;

277 278 279 280 281 282 283 284 285
		pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	return 1;
};

286
/* Reprogram MCx_MISC MSR behind this threshold bank. */
287
static void threshold_restart_bank(void *_tr)
288
{
289
	struct thresh_restart *tr = _tr;
290
	u32 hi, lo;
291

292
	rdmsr(tr->b->address, lo, hi);
293

294
	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
295
		tr->reset = 1;	/* limit cannot be lower than err count */
296

297
	if (tr->reset) {		/* reset err count and overflow bit */
298 299
		hi =
		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
300 301
		    (THRESHOLD_MAX - tr->b->threshold_limit);
	} else if (tr->old_limit) {	/* change limit w/o reset */
302
		int new_count = (hi & THRESHOLD_MAX) +
303
		    (tr->old_limit - tr->b->threshold_limit);
I
Ingo Molnar 已提交
304

305
		hi = (hi & ~MASK_ERR_COUNT_HI) |
306 307 308
		    (new_count & THRESHOLD_MAX);
	}

309 310 311 312 313 314
	/* clear IntType */
	hi &= ~MASK_INT_TYPE_HI;

	if (!tr->b->interrupt_capable)
		goto done;

315
	if (tr->set_lvt_off) {
316 317 318 319 320
		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
			/* set new lvt offset */
			hi &= ~MASK_LVTOFF_HI;
			hi |= tr->lvt_off << 20;
		}
321 322
	}

323 324 325 326
	if (tr->b->interrupt_enable)
		hi |= INT_TYPE_APIC;

 done:
327

328 329
	hi |= MASK_COUNT_EN_HI;
	wrmsr(tr->b->address, lo, hi);
330 331
}

332 333 334 335 336 337 338 339 340 341 342 343
static void mce_threshold_block_init(struct threshold_block *b, int offset)
{
	struct thresh_restart tr = {
		.b			= b,
		.set_lvt_off		= 1,
		.lvt_off		= offset,
	};

	b->threshold_limit		= THRESHOLD_MAX;
	threshold_restart_bank(&tr);
};

344
static int setup_APIC_mce_threshold(int reserved, int new)
345 346 347 348 349 350 351 352
{
	if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
static int setup_APIC_deferred_error(int reserved, int new)
{
	if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
{
	u32 low = 0, high = 0;
	int def_offset = -1, def_new;

	if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
		return;

	def_new = (low & MASK_DEF_LVTOFF) >> 4;
	if (!(low & MASK_DEF_LVTOFF)) {
		pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
		def_new = DEF_LVT_OFF;
		low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
	}

	def_offset = setup_APIC_deferred_error(def_offset, def_new);
	if ((def_offset == def_new) &&
	    (deferred_error_int_vector != amd_deferred_error_interrupt))
		deferred_error_int_vector = amd_deferred_error_interrupt;

	low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
	wrmsr(MSR_CU_DEF_ERR, low, high);
}

386
static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
			     unsigned int bank, unsigned int block)
{
	u32 addr = 0, offset = 0;

	if (mce_flags.smca) {
		if (!block) {
			addr = MSR_AMD64_SMCA_MCx_MISC(bank);
		} else {
			/*
			 * For SMCA enabled processors, BLKPTR field of the
			 * first MISC register (MCx_MISC0) indicates presence of
			 * additional MISC register set (MISC1-4).
			 */
			u32 low, high;

402
			if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
403 404 405 406 407
				return addr;

			if (!(low & MCI_CONFIG_MCAX))
				return addr;

408
			if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
409 410 411 412 413 414 415 416 417
			    (low & MASK_BLKPTR_LO))
				addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
		}
		return addr;
	}

	/* Fall back to method we used for older processors: */
	switch (block) {
	case 0:
418
		addr = msr_ops.misc(bank);
419 420 421 422 423 424 425 426 427 428 429 430
		break;
	case 1:
		offset = ((low & MASK_BLKPTR_LO) >> 21);
		if (offset)
			addr = MCG_XBLK_ADDR + offset;
		break;
	default:
		addr = ++current_addr;
	}
	return addr;
}

431 432 433 434 435
static int
prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
			int offset, u32 misc_high)
{
	unsigned int cpu = smp_processor_id();
436
	u32 smca_low, smca_high, smca_addr;
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
	struct threshold_block b;
	int new;

	if (!block)
		per_cpu(bank_map, cpu) |= (1 << bank);

	memset(&b, 0, sizeof(b));
	b.cpu			= cpu;
	b.bank			= bank;
	b.block			= block;
	b.address		= addr;
	b.interrupt_capable	= lvt_interrupt_supported(bank, misc_high);

	if (!b.interrupt_capable)
		goto done;

	b.interrupt_enable = 1;

455 456 457 458
	if (!mce_flags.smca) {
		new = (misc_high & MASK_LVTOFF_HI) >> 20;
		goto set_offset;
	}
459

460
	smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
461

462 463 464 465 466 467 468 469 470 471 472
	if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
		/*
		 * OS is required to set the MCAX bit to acknowledge that it is
		 * now using the new MSR ranges and new registers under each
		 * bank. It also means that the OS will configure deferred
		 * errors in the new MCx_CONFIG register. If the bit is not set,
		 * uncorrectable errors will cause a system panic.
		 *
		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
		 */
		smca_high |= BIT(0);
473

474 475 476 477 478 479 480 481 482 483 484 485 486 487
		/*
		 * SMCA sets the Deferred Error Interrupt type per bank.
		 *
		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
		 * if the DeferredIntType bit field is available.
		 *
		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
		 * high portion of the MSR). OS should set this to 0x1 to enable
		 * APIC based interrupt. First, check that no interrupt has been
		 * set.
		 */
		if ((smca_low & BIT(5)) && !((smca_high >> 5) & 0x3))
			smca_high |= BIT(5);

488
		wrmsr(smca_addr, smca_low, smca_high);
489 490
	}

491 492 493 494 495 496 497
	/* Gather LVT offset for thresholding: */
	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
		goto out;

	new = (smca_low & SMCA_THR_LVT_OFF) >> 12;

set_offset:
498 499 500 501 502 503 504 505 506 507 508 509
	offset = setup_APIC_mce_threshold(offset, new);

	if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
		mce_threshold_vector = amd_threshold_interrupt;

done:
	mce_threshold_block_init(&b, offset);

out:
	return offset;
}

510
/* cpu init entry point, called from mce.c with preempt off */
511
void mce_amd_feature_init(struct cpuinfo_x86 *c)
512
{
513
	u32 low = 0, high = 0, address = 0;
514
	unsigned int bank, block, cpu = smp_processor_id();
515
	int offset = -1;
516

517
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
518 519 520
		if (mce_flags.smca)
			get_smca_bank_info(bank);

521
		for (block = 0; block < NR_BLOCKS; ++block) {
522
			address = get_block_address(cpu, address, low, high, bank, block);
523 524
			if (!address)
				break;
525 526

			if (rdmsr_safe(address, &low, &high))
527
				break;
528

529 530
			if (!(high & MASK_VALID_HI))
				continue;
531

532 533
			if (!(high & MASK_CNTP_HI)  ||
			     (high & MASK_LOCKED_HI))
534 535
				continue;

536
			offset = prepare_threshold_block(bank, block, address, offset, high);
537
		}
538
	}
539 540 541

	if (mce_flags.succor)
		deferred_error_interrupt_enable(c);
542 543
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
	/* We start from the normalized address */
	u64 ret_addr = norm_addr;

	u32 tmp;

	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
	u8 intlv_addr_sel, intlv_addr_bit;
	u8 num_intlv_bits, hashed_bit;
	u8 lgcy_mmio_hole_en, base = 0;
	u8 cs_mask, cs_id = 0;
	bool hash_enabled = false;

	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
	if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
		goto out_err;

	/* Remove HiAddrOffset from normalized address, if enabled: */
	if (tmp & BIT(0)) {
		u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;

		if (norm_addr >= hi_addr_offset) {
			ret_addr -= hi_addr_offset;
			base = 1;
		}
	}

	/* Read D18F0x110 (DramBaseAddress). */
	if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
		goto out_err;

	/* Check if address range is valid. */
	if (!(tmp & BIT(0))) {
		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
			__func__, tmp);
		goto out_err;
	}

	lgcy_mmio_hole_en = tmp & BIT(1);
	intlv_num_chan	  = (tmp >> 4) & 0xF;
	intlv_addr_sel	  = (tmp >> 8) & 0x7;
	dram_base_addr	  = (tmp & GENMASK_ULL(31, 12)) << 16;

	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
	if (intlv_addr_sel > 3) {
		pr_err("%s: Invalid interleave address select %d.\n",
			__func__, intlv_addr_sel);
		goto out_err;
	}

	/* Read D18F0x114 (DramLimitAddress). */
	if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
		goto out_err;

	intlv_num_sockets = (tmp >> 8) & 0x1;
	intlv_num_dies	  = (tmp >> 10) & 0x3;
	dram_limit_addr	  = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);

	intlv_addr_bit = intlv_addr_sel + 8;

	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
	switch (intlv_num_chan) {
	case 0:	intlv_num_chan = 0; break;
	case 1: intlv_num_chan = 1; break;
	case 3: intlv_num_chan = 2; break;
	case 5:	intlv_num_chan = 3; break;
	case 7:	intlv_num_chan = 4; break;

	case 8: intlv_num_chan = 1;
		hash_enabled = true;
		break;
	default:
		pr_err("%s: Invalid number of interleaved channels %d.\n",
			__func__, intlv_num_chan);
		goto out_err;
	}

	num_intlv_bits = intlv_num_chan;

	if (intlv_num_dies > 2) {
		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
			__func__, intlv_num_dies);
		goto out_err;
	}

	num_intlv_bits += intlv_num_dies;

	/* Add a bit if sockets are interleaved. */
	num_intlv_bits += intlv_num_sockets;

	/* Assert num_intlv_bits <= 4 */
	if (num_intlv_bits > 4) {
		pr_err("%s: Invalid interleave bits %d.\n",
			__func__, num_intlv_bits);
		goto out_err;
	}

	if (num_intlv_bits > 0) {
		u64 temp_addr_x, temp_addr_i, temp_addr_y;
		u8 die_id_bit, sock_id_bit, cs_fabric_id;

		/*
		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
		 * This is the fabric id for this coherent slave. Use
		 * umc/channel# as instance id of the coherent slave
		 * for FICAA.
		 */
		if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
			goto out_err;

		cs_fabric_id = (tmp >> 8) & 0xFF;
		die_id_bit   = 0;

		/* If interleaved over more than 1 channel: */
		if (intlv_num_chan) {
			die_id_bit = intlv_num_chan;
			cs_mask	   = (1 << die_id_bit) - 1;
			cs_id	   = cs_fabric_id & cs_mask;
		}

		sock_id_bit = die_id_bit;

		/* Read D18F1x208 (SystemFabricIdMask). */
		if (intlv_num_dies || intlv_num_sockets)
			if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
				goto out_err;

		/* If interleaved over more than 1 die. */
		if (intlv_num_dies) {
			sock_id_bit  = die_id_bit + intlv_num_dies;
			die_id_shift = (tmp >> 24) & 0xF;
			die_id_mask  = (tmp >> 8) & 0xFF;

			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
		}

		/* If interleaved over more than 1 socket. */
		if (intlv_num_sockets) {
			socket_id_shift	= (tmp >> 28) & 0xF;
			socket_id_mask	= (tmp >> 16) & 0xFF;

			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
		}

		/*
		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
		 * where III is the ID for this CS, and XXXXXXYYYYY are the
		 * address bits from the post-interleaved address.
		 * "num_intlv_bits" has been calculated to tell us how many "I"
		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
		 * there are (where "I" starts).
		 */
		temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
		temp_addr_i = (cs_id << intlv_addr_bit);
		temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
		ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
	}

	/* Add dram base address */
	ret_addr += dram_base_addr;

	/* If legacy MMIO hole enabled */
	if (lgcy_mmio_hole_en) {
		if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
			goto out_err;

		dram_hole_base = tmp & GENMASK(31, 24);
		if (ret_addr >= dram_hole_base)
			ret_addr += (BIT_ULL(32) - dram_hole_base);
	}

	if (hash_enabled) {
		/* Save some parentheses and grab ls-bit at the end. */
		hashed_bit =	(ret_addr >> 12) ^
				(ret_addr >> 18) ^
				(ret_addr >> 21) ^
				(ret_addr >> 30) ^
				cs_id;

		hashed_bit &= BIT(0);

		if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
			ret_addr ^= BIT(intlv_addr_bit);
	}

	/* Is calculated system address is above DRAM limit address? */
	if (ret_addr > dram_limit_addr)
		goto out_err;

	*sys_addr = ret_addr;
	return 0;

out_err:
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);

744
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
745 746 747 748 749 750
{
	struct mce m;

	mce_setup(&m);

	m.status = status;
751
	m.misc   = misc;
752 753
	m.bank   = bank;
	m.tsc	 = rdtsc();
754

755
	if (m.status & MCI_STATUS_ADDRV) {
756
		m.addr = addr;
757

758 759 760 761 762 763 764 765 766 767 768
		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m.addr >> 56) & 0x3f;

			m.addr &= GENMASK_ULL(55, lsb);
		}
	}

769 770 771 772 773 774
	if (mce_flags.smca) {
		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);

		if (m.status & MCI_STATUS_SYNDV)
			rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
	}
775

776
	mce_log(&m);
777 778
}

779 780 781 782 783 784
static inline void __smp_deferred_error_interrupt(void)
{
	inc_irq_stat(irq_deferred_error_count);
	deferred_error_int_vector();
}

785
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
786 787 788 789 790 791
{
	entering_irq();
	__smp_deferred_error_interrupt();
	exiting_ack_irq();
}

792
asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
793 794 795 796 797 798 799 800
{
	entering_irq();
	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
	__smp_deferred_error_interrupt();
	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
	exiting_ack_irq();
}

801 802 803 804 805
/*
 * Returns true if the logged error is deferred. False, otherwise.
 */
static inline bool
_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
806
{
807
	u64 status, addr = 0;
808

809 810 811
	rdmsrl(msr_stat, status);
	if (!(status & MCI_STATUS_VAL))
		return false;
812

813 814
	if (status & MCI_STATUS_ADDRV)
		rdmsrl(msr_addr, addr);
815

816
	__log_error(bank, status, addr, misc);
817

818 819 820
	wrmsrl(status, 0);

	return status & MCI_STATUS_DEFERRED;
821 822
}

823
/*
824 825 826 827 828 829 830
 * We have three scenarios for checking for Deferred errors:
 *
 * 1) Non-SMCA systems check MCA_STATUS and log error if found.
 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
 *    clear MCA_DESTAT.
 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
 *    log it.
831
 */
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
static void log_error_deferred(unsigned int bank)
{
	bool defrd;

	defrd = _log_error_bank(bank, msr_ops.status(bank),
					msr_ops.addr(bank), 0);

	if (!mce_flags.smca)
		return;

	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
	if (defrd) {
		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
		return;
	}

	/*
	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
	 * for a valid error.
	 */
	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
}

/* APIC interrupt handler for deferred errors */
static void amd_deferred_error_interrupt(void)
{
	unsigned int bank;

	for (bank = 0; bank < mca_cfg.banks; ++bank)
		log_error_deferred(bank);
}

static void log_error_thresholding(unsigned int bank, u64 misc)
{
	_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
}
869 870

/*
871 872
 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
 * goes off when error_count reaches threshold_limit.
873
 */
874
static void amd_threshold_interrupt(void)
875
{
I
Ingo Molnar 已提交
876
	u32 low = 0, high = 0, address = 0;
877
	unsigned int bank, block, cpu = smp_processor_id();
878
	struct thresh_restart tr;
879

880
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
881
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
882
			continue;
883
		for (block = 0; block < NR_BLOCKS; ++block) {
884
			address = get_block_address(cpu, address, low, high, bank, block);
885 886
			if (!address)
				break;
887 888

			if (rdmsr_safe(address, &low, &high))
889
				break;
890 891 892 893 894 895 896 897

			if (!(high & MASK_VALID_HI)) {
				if (block)
					continue;
				else
					break;
			}

898 899
			if (!(high & MASK_CNTP_HI)  ||
			     (high & MASK_LOCKED_HI))
900 901
				continue;

902 903
			if (!(high & MASK_OVERFLOW_HI))
				continue;
904

905 906
			/* Log the MCE which caused the threshold event. */
			log_error_thresholding(bank, ((u64)high << 32) | low);
907

908 909 910 911 912 913
			/* Reset threshold block after logging error. */
			memset(&tr, 0, sizeof(tr));
			tr.b = &per_cpu(threshold_banks, cpu)[bank]->blocks[block];
			threshold_restart_bank(&tr);
		}
	}
914 915 916 917 918 919 920
}

/*
 * Sysfs Interface
 */

struct threshold_attr {
J
Jacob Shin 已提交
921
	struct attribute attr;
I
Ingo Molnar 已提交
922 923
	ssize_t (*show) (struct threshold_block *, char *);
	ssize_t (*store) (struct threshold_block *, const char *, size_t count);
924 925
};

I
Ingo Molnar 已提交
926 927 928
#define SHOW_FIELDS(name)						\
static ssize_t show_ ## name(struct threshold_block *b, char *buf)	\
{									\
929
	return sprintf(buf, "%lu\n", (unsigned long) b->name);		\
J
Jacob Shin 已提交
930
}
931 932 933
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)

I
Ingo Molnar 已提交
934
static ssize_t
H
Hidetoshi Seto 已提交
935
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
936
{
937
	struct thresh_restart tr;
I
Ingo Molnar 已提交
938 939
	unsigned long new;

940 941 942
	if (!b->interrupt_capable)
		return -EINVAL;

943
	if (kstrtoul(buf, 0, &new) < 0)
944
		return -EINVAL;
I
Ingo Molnar 已提交
945

946 947
	b->interrupt_enable = !!new;

948
	memset(&tr, 0, sizeof(tr));
I
Ingo Molnar 已提交
949 950
	tr.b		= b;

951
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
952

H
Hidetoshi Seto 已提交
953
	return size;
954 955
}

I
Ingo Molnar 已提交
956
static ssize_t
H
Hidetoshi Seto 已提交
957
store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
958
{
959
	struct thresh_restart tr;
I
Ingo Molnar 已提交
960 961
	unsigned long new;

962
	if (kstrtoul(buf, 0, &new) < 0)
963
		return -EINVAL;
I
Ingo Molnar 已提交
964

965 966 967 968
	if (new > THRESHOLD_MAX)
		new = THRESHOLD_MAX;
	if (new < 1)
		new = 1;
I
Ingo Molnar 已提交
969

970
	memset(&tr, 0, sizeof(tr));
971
	tr.old_limit = b->threshold_limit;
972
	b->threshold_limit = new;
973
	tr.b = b;
974

975
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
976

H
Hidetoshi Seto 已提交
977
	return size;
978 979
}

980 981
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
982 983 984
	u32 lo, hi;

	rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
985

986 987
	return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
				     (THRESHOLD_MAX - b->threshold_limit)));
988 989
}

990 991 992 993
static struct threshold_attr error_count = {
	.attr = {.name = __stringify(error_count), .mode = 0444 },
	.show = show_error_count,
};
994

995 996 997 998 999
#define RW_ATTR(val)							\
static struct threshold_attr val = {					\
	.attr	= {.name = __stringify(val), .mode = 0644 },		\
	.show	= show_## val,						\
	.store	= store_## val,						\
1000 1001
};

J
Jacob Shin 已提交
1002 1003
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
1004 1005 1006 1007

static struct attribute *default_attrs[] = {
	&threshold_limit.attr,
	&error_count.attr,
1008 1009
	NULL,	/* possibly interrupt_enable if supported, see below */
	NULL,
1010 1011
};

I
Ingo Molnar 已提交
1012 1013
#define to_block(k)	container_of(k, struct threshold_block, kobj)
#define to_attr(a)	container_of(a, struct threshold_attr, attr)
1014 1015 1016

static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
1017
	struct threshold_block *b = to_block(kobj);
1018 1019
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1020

1021
	ret = a->show ? a->show(b, buf) : -EIO;
I
Ingo Molnar 已提交
1022

1023 1024 1025 1026 1027 1028
	return ret;
}

static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
{
1029
	struct threshold_block *b = to_block(kobj);
1030 1031
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1032

1033
	ret = a->store ? a->store(b, buf, count) : -EIO;
I
Ingo Molnar 已提交
1034

1035 1036 1037
	return ret;
}

1038
static const struct sysfs_ops threshold_ops = {
I
Ingo Molnar 已提交
1039 1040
	.show			= show,
	.store			= store,
1041 1042 1043
};

static struct kobj_type threshold_ktype = {
I
Ingo Molnar 已提交
1044 1045
	.sysfs_ops		= &threshold_ops,
	.default_attrs		= default_attrs,
1046 1047
};

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
static const char *get_name(unsigned int bank, struct threshold_block *b)
{
	unsigned int bank_type;

	if (!mce_flags.smca) {
		if (b && bank == 4)
			return bank4_names(b);

		return th_names[bank];
	}

1059
	if (!smca_banks[bank].hwid)
1060 1061
		return NULL;

1062
	bank_type = smca_banks[bank].hwid->bank_type;
1063 1064 1065 1066 1067 1068 1069

	if (b && bank_type == SMCA_UMC) {
		if (b->block < ARRAY_SIZE(smca_umc_block_names))
			return smca_umc_block_names[b->block];
		return NULL;
	}

1070 1071 1072
	if (smca_banks[bank].hwid->count == 1)
		return smca_get_name(bank_type);

1073
	snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
B
Borislav Petkov 已提交
1074
		 "%s_%x", smca_get_name(bank_type),
1075
			  smca_banks[bank].sysfs_id);
1076 1077 1078
	return buf_mcatype;
}

1079 1080
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
				     unsigned int block, u32 address)
1081 1082
{
	struct threshold_block *b = NULL;
I
Ingo Molnar 已提交
1083 1084
	u32 low, high;
	int err;
1085

1086
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1087 1088
		return 0;

1089
	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1090
		return 0;
1091 1092 1093 1094 1095 1096 1097 1098

	if (!(high & MASK_VALID_HI)) {
		if (block)
			goto recurse;
		else
			return 0;
	}

1099 1100
	if (!(high & MASK_CNTP_HI)  ||
	     (high & MASK_LOCKED_HI))
1101 1102 1103 1104 1105 1106
		goto recurse;

	b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
	if (!b)
		return -ENOMEM;

I
Ingo Molnar 已提交
1107 1108 1109 1110 1111
	b->block		= block;
	b->bank			= bank;
	b->cpu			= cpu;
	b->address		= address;
	b->interrupt_enable	= 0;
1112
	b->interrupt_capable	= lvt_interrupt_supported(bank, high);
I
Ingo Molnar 已提交
1113
	b->threshold_limit	= THRESHOLD_MAX;
1114

1115
	if (b->interrupt_capable) {
1116
		threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1117 1118
		b->interrupt_enable = 1;
	} else {
1119
		threshold_ktype.default_attrs[2] = NULL;
1120
	}
1121

1122 1123
	INIT_LIST_HEAD(&b->miscj);

I
Ingo Molnar 已提交
1124
	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1125 1126
		list_add(&b->miscj,
			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
I
Ingo Molnar 已提交
1127
	} else {
1128
		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
I
Ingo Molnar 已提交
1129
	}
1130

1131 1132
	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
				   per_cpu(threshold_banks, cpu)[bank]->kobj,
1133
				   get_name(bank, b));
1134 1135 1136
	if (err)
		goto out_free;
recurse:
1137
	address = get_block_address(cpu, address, low, high, bank, ++block);
1138 1139
	if (!address)
		return 0;
1140

1141
	err = allocate_threshold_blocks(cpu, bank, block, address);
1142 1143 1144
	if (err)
		goto out_free;

1145 1146
	if (b)
		kobject_uevent(&b->kobj, KOBJ_ADD);
1147

1148 1149 1150 1151
	return err;

out_free:
	if (b) {
1152
		kobject_put(&b->kobj);
1153
		list_del(&b->miscj);
1154 1155 1156 1157 1158
		kfree(b);
	}
	return err;
}

1159
static int __threshold_add_blocks(struct threshold_bank *b)
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
{
	struct list_head *head = &b->blocks->miscj;
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	int err = 0;

	err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
	if (err)
		return err;

	list_for_each_entry_safe(pos, tmp, head, miscj) {

		err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
		if (err) {
			list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
				kobject_del(&pos->kobj);

			return err;
		}
	}
	return err;
}

1183
static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1184
{
1185
	struct device *dev = per_cpu(mce_device, cpu);
1186
	struct amd_northbridge *nb = NULL;
1187
	struct threshold_bank *b = NULL;
1188
	const char *name = get_name(bank, NULL);
1189
	int err = 0;
1190

1191 1192 1193
	if (!dev)
		return -ENODEV;

1194
	if (is_shared_bank(bank)) {
1195 1196 1197
		nb = node_to_amd_nb(amd_get_nb_id(cpu));

		/* threshold descriptor already initialized on this node? */
1198
		if (nb && nb->bank4) {
1199 1200 1201 1202 1203 1204 1205
			/* yes, use it */
			b = nb->bank4;
			err = kobject_add(b->kobj, &dev->kobj, name);
			if (err)
				goto out;

			per_cpu(threshold_banks, cpu)[bank] = b;
1206
			refcount_inc(&b->cpus);
1207 1208 1209 1210 1211 1212 1213

			err = __threshold_add_blocks(b);

			goto out;
		}
	}

1214
	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1215 1216 1217 1218 1219
	if (!b) {
		err = -ENOMEM;
		goto out;
	}

1220
	b->kobj = kobject_create_and_add(name, &dev->kobj);
1221 1222
	if (!b->kobj) {
		err = -EINVAL;
1223
		goto out_free;
1224
	}
1225

1226
	per_cpu(threshold_banks, cpu)[bank] = b;
1227

1228
	if (is_shared_bank(bank)) {
1229
		refcount_set(&b->cpus, 1);
1230 1231

		/* nb is already initialized, see above */
1232 1233 1234 1235
		if (nb) {
			WARN_ON(nb->bank4);
			nb->bank4 = b;
		}
1236 1237
	}

1238
	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1239 1240
	if (!err)
		goto out;
1241

1242
 out_free:
1243
	kfree(b);
1244 1245

 out:
1246 1247 1248
	return err;
}

1249
static void deallocate_threshold_block(unsigned int cpu,
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
						 unsigned int bank)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];

	if (!head)
		return;

	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1260
		kobject_put(&pos->kobj);
1261 1262 1263 1264 1265 1266 1267 1268
		list_del(&pos->miscj);
		kfree(pos);
	}

	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
static void __threshold_remove_blocks(struct threshold_bank *b)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;

	kobject_del(b->kobj);

	list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
		kobject_del(&pos->kobj);
}

1280
static void threshold_remove_bank(unsigned int cpu, int bank)
1281
{
1282
	struct amd_northbridge *nb;
1283 1284 1285 1286 1287
	struct threshold_bank *b;

	b = per_cpu(threshold_banks, cpu)[bank];
	if (!b)
		return;
1288

1289 1290 1291
	if (!b->blocks)
		goto free_out;

1292
	if (is_shared_bank(bank)) {
1293
		if (!refcount_dec_and_test(&b->cpus)) {
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
			__threshold_remove_blocks(b);
			per_cpu(threshold_banks, cpu)[bank] = NULL;
			return;
		} else {
			/*
			 * the last CPU on this node using the shared bank is
			 * going away, remove that bank now.
			 */
			nb = node_to_amd_nb(amd_get_nb_id(cpu));
			nb->bank4 = NULL;
		}
	}

1307 1308 1309
	deallocate_threshold_block(cpu, bank);

free_out:
1310
	kobject_del(b->kobj);
1311
	kobject_put(b->kobj);
1312 1313
	kfree(b);
	per_cpu(threshold_banks, cpu)[bank] = NULL;
1314 1315
}

1316
int mce_threshold_remove_device(unsigned int cpu)
1317
{
J
Jacob Shin 已提交
1318
	unsigned int bank;
1319

1320 1321 1322
	if (!thresholding_en)
		return 0;

1323
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
1324
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1325 1326 1327
			continue;
		threshold_remove_bank(cpu, bank);
	}
1328
	kfree(per_cpu(threshold_banks, cpu));
1329
	per_cpu(threshold_banks, cpu) = NULL;
1330
	return 0;
1331 1332
}

1333
/* create dir/files for all valid threshold banks */
1334
int mce_threshold_create_device(unsigned int cpu)
1335
{
1336 1337 1338 1339
	unsigned int bank;
	struct threshold_bank **bp;
	int err = 0;

1340 1341 1342
	if (!thresholding_en)
		return 0;

1343 1344 1345 1346
	bp = per_cpu(threshold_banks, cpu);
	if (bp)
		return 0;

1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
		     GFP_KERNEL);
	if (!bp)
		return -ENOMEM;

	per_cpu(threshold_banks, cpu) = bp;

	for (bank = 0; bank < mca_cfg.banks; ++bank) {
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
			continue;
		err = threshold_create_bank(cpu, bank);
		if (err)
1359
			goto err;
1360
	}
1361 1362
	return err;
err:
1363
	mce_threshold_remove_device(cpu);
1364
	return err;
1365 1366 1367 1368
}

static __init int threshold_init_device(void)
{
J
Jacob Shin 已提交
1369
	unsigned lcpu = 0;
1370

1371 1372 1373
	if (mce_threshold_vector == amd_threshold_interrupt)
		thresholding_en = true;

1374 1375
	/* to hit CPUs online before the notifier is up */
	for_each_online_cpu(lcpu) {
1376
		int err = mce_threshold_create_device(lcpu);
I
Ingo Molnar 已提交
1377

1378
		if (err)
1379
			return err;
1380
	}
I
Ingo Molnar 已提交
1381

1382
	return 0;
1383
}
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
/*
 * there are 3 funcs which need to be _initcalled in a logic sequence:
 * 1. xen_late_init_mcelog
 * 2. mcheck_init_device
 * 3. threshold_init_device
 *
 * xen_late_init_mcelog must register xen_mce_chrdev_device before
 * native mce_chrdev_device registration if running under xen platform;
 *
 * mcheck_init_device should be inited before threshold_init_device to
 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
 *
 * so we use following _initcalls
 * 1. device_initcall(xen_late_init_mcelog);
 * 2. device_initcall_sync(mcheck_init_device);
 * 3. late_initcall(threshold_init_device);
 *
 * when running under xen, the initcall order is 1,2,3;
 * on baremetal, we skip 1 and we do only 2 and 3.
 */
late_initcall(threshold_init_device);