mce_amd.c 34.8 KB
Newer Older
1
/*
2
 *  (c) 2005-2016 Advanced Micro Devices, Inc.
3 4 5 6 7
 *  Your use of this code is subject to the terms and conditions of the
 *  GNU general public license version 2. See "COPYING" or
 *  http://www.gnu.org/licenses/gpl.html
 *
 *  Written by Jacob Shin - AMD, Inc.
8
 *  Maintained by: Borislav Petkov <bp@alien8.de>
9
 *
B
Borislav Petkov 已提交
10
 *  All MC4_MISCi registers are shared between cores on a node.
11 12 13
 */
#include <linux/interrupt.h>
#include <linux/notifier.h>
I
Ingo Molnar 已提交
14
#include <linux/kobject.h>
15
#include <linux/percpu.h>
I
Ingo Molnar 已提交
16 17
#include <linux/errno.h>
#include <linux/sched.h>
18
#include <linux/sysfs.h>
19
#include <linux/slab.h>
I
Ingo Molnar 已提交
20 21 22
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/smp.h>
23
#include <linux/string.h>
I
Ingo Molnar 已提交
24

25
#include <asm/amd_nb.h>
26
#include <asm/traps.h>
27 28 29
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
30
#include <asm/trace/irq_vectors.h>
31

B
Borislav Petkov 已提交
32 33
#include "mce-internal.h"

34
#define NR_BLOCKS         5
J
Jacob Shin 已提交
35 36 37
#define THRESHOLD_MAX     0xFFF
#define INT_TYPE_APIC     0x00020000
#define MASK_VALID_HI     0x80000000
38 39
#define MASK_CNTP_HI      0x40000000
#define MASK_LOCKED_HI    0x20000000
J
Jacob Shin 已提交
40 41 42 43
#define MASK_LVTOFF_HI    0x00F00000
#define MASK_COUNT_EN_HI  0x00080000
#define MASK_INT_TYPE_HI  0x00060000
#define MASK_OVERFLOW_HI  0x00010000
44
#define MASK_ERR_COUNT_HI 0x00000FFF
45 46
#define MASK_BLKPTR_LO    0xFF000000
#define MCG_XBLK_ADDR     0xC0000400
47

48 49 50 51 52 53 54
/* Deferred error settings */
#define MSR_CU_DEF_ERR		0xC0000410
#define MASK_DEF_LVTOFF		0x000000F0
#define MASK_DEF_INT_TYPE	0x00000006
#define DEF_LVT_OFF		0x2
#define DEF_INT_TYPE_APIC	0x2

55 56 57 58 59
/* Scalable MCA: */

/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF	0xF000

60
static bool thresholding_irq_en;
61

62 63 64 65
static const char * const th_names[] = {
	"load_store",
	"insn_fetch",
	"combined_unit",
66
	"decode_unit",
67 68 69 70
	"northbridge",
	"execution_unit",
};

71 72 73 74 75
static const char * const smca_umc_block_names[] = {
	"dram_ecc",
	"misc_umc"
};

B
Borislav Petkov 已提交
76 77 78 79 80 81
struct smca_bank_name {
	const char *name;	/* Short name for sysfs */
	const char *long_name;	/* Long name for pretty-printing */
};

static struct smca_bank_name smca_names[] = {
82 83 84 85
	[SMCA_LS]	= { "load_store",	"Load Store Unit" },
	[SMCA_IF]	= { "insn_fetch",	"Instruction Fetch Unit" },
	[SMCA_L2_CACHE]	= { "l2_cache",		"L2 Cache" },
	[SMCA_DE]	= { "decode_unit",	"Decode Unit" },
86
	[SMCA_RESERVED]	= { "reserved",		"Reserved" },
87 88 89 90 91 92 93 94 95
	[SMCA_EX]	= { "execution_unit",	"Execution Unit" },
	[SMCA_FP]	= { "floating_point",	"Floating Point Unit" },
	[SMCA_L3_CACHE]	= { "l3_cache",		"L3 Cache" },
	[SMCA_CS]	= { "coherent_slave",	"Coherent Slave" },
	[SMCA_PIE]	= { "pie",		"Power, Interrupts, etc." },
	[SMCA_UMC]	= { "umc",		"Unified Memory Controller" },
	[SMCA_PB]	= { "param_block",	"Parameter Block" },
	[SMCA_PSP]	= { "psp",		"Platform Security Processor" },
	[SMCA_SMU]	= { "smu",		"System Management Unit" },
96
};
B
Borislav Petkov 已提交
97

98 99 100 101 102
static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
{
	[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};

103
static const char *smca_get_name(enum smca_bank_types t)
B
Borislav Petkov 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].name;
}

const char *smca_get_long_name(enum smca_bank_types t)
{
	if (t >= N_SMCA_BANK_TYPES)
		return NULL;

	return smca_names[t].long_name;
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
119

120
static enum smca_bank_types smca_get_bank_type(unsigned int bank)
121 122 123
{
	struct smca_bank *b;

124
	if (bank >= MAX_NR_BANKS)
125 126
		return N_SMCA_BANK_TYPES;

127
	b = &smca_banks[bank];
128 129 130 131 132 133
	if (!b->hwid)
		return N_SMCA_BANK_TYPES;

	return b->hwid->bank_type;
}

134
static struct smca_hwid smca_hwid_mcatypes[] = {
135 136
	/* { bank_type, hwid_mcatype, xec_bitmap } */

137 138 139
	/* Reserved type */
	{ SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	/* ZN Core (HWID=0xB0) MCA types */
	{ SMCA_LS,	 HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
	{ SMCA_IF,	 HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
	{ SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
	{ SMCA_DE,	 HWID_MCATYPE(0xB0, 0x3), 0x1FF },
	/* HWID 0xB0 MCATYPE 0x4 is Reserved */
	{ SMCA_EX,	 HWID_MCATYPE(0xB0, 0x5), 0x7FF },
	{ SMCA_FP,	 HWID_MCATYPE(0xB0, 0x6), 0x7F },
	{ SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },

	/* Data Fabric MCA types */
	{ SMCA_CS,	 HWID_MCATYPE(0x2E, 0x0), 0x1FF },
	{ SMCA_PIE,	 HWID_MCATYPE(0x2E, 0x1), 0xF },

	/* Unified Memory Controller MCA type */
	{ SMCA_UMC,	 HWID_MCATYPE(0x96, 0x0), 0x3F },

	/* Parameter Block MCA type */
	{ SMCA_PB,	 HWID_MCATYPE(0x05, 0x0), 0x1 },
159

160 161 162 163 164
	/* Platform Security Processor MCA type */
	{ SMCA_PSP,	 HWID_MCATYPE(0xFF, 0x0), 0x1 },

	/* System Management Unit MCA type */
	{ SMCA_SMU,	 HWID_MCATYPE(0x01, 0x0), 0x1 },
165
};
166

167
struct smca_bank smca_banks[MAX_NR_BANKS];
168
EXPORT_SYMBOL_GPL(smca_banks);
169

170 171 172 173 174 175 176 177 178 179 180
/*
 * In SMCA enabled processors, we can have multiple banks for a given IP type.
 * So to define a unique name for each bank, we use a temp c-string to append
 * the MCA_IPID[InstanceId] to type's name in get_name().
 *
 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
 */
#define MAX_MCATYPE_NAME_LEN	30
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];

181
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
182
static DEFINE_PER_CPU(unsigned int, bank_map);	/* see which banks are on */
183

184
static void amd_threshold_interrupt(void);
185 186 187 188 189 190 191
static void amd_deferred_error_interrupt(void);

static void default_deferred_error_interrupt(void)
{
	pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
192

193
static void smca_configure(unsigned int bank, unsigned int cpu)
194
{
195
	unsigned int i, hwid_mcatype;
196
	struct smca_hwid *s_hwid;
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	u32 high, low;
	u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);

	/* Set appropriate bits in MCA_CONFIG */
	if (!rdmsr_safe(smca_config, &low, &high)) {
		/*
		 * OS is required to set the MCAX bit to acknowledge that it is
		 * now using the new MSR ranges and new registers under each
		 * bank. It also means that the OS will configure deferred
		 * errors in the new MCx_CONFIG register. If the bit is not set,
		 * uncorrectable errors will cause a system panic.
		 *
		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
		 */
		high |= BIT(0);

		/*
		 * SMCA sets the Deferred Error Interrupt type per bank.
		 *
		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
		 * if the DeferredIntType bit field is available.
		 *
		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
		 * high portion of the MSR). OS should set this to 0x1 to enable
		 * APIC based interrupt. First, check that no interrupt has been
		 * set.
		 */
		if ((low & BIT(5)) && !((high >> 5) & 0x3))
			high |= BIT(5);

		wrmsr(smca_config, low, high);
	}
229

230 231
	/* Return early if this bank was already initialized. */
	if (smca_banks[bank].hwid)
232 233
		return;

234
	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
235 236 237 238
		pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
		return;
	}

239 240
	hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
				    (high & MCI_IPID_MCATYPE) >> 16);
241 242

	for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
243 244 245
		s_hwid = &smca_hwid_mcatypes[i];
		if (hwid_mcatype == s_hwid->hwid_mcatype) {
			smca_banks[bank].hwid = s_hwid;
246
			smca_banks[bank].id = low;
247
			smca_banks[bank].sysfs_id = s_hwid->count++;
248 249 250 251 252
			break;
		}
	}
}

253
struct thresh_restart {
I
Ingo Molnar 已提交
254 255
	struct threshold_block	*b;
	int			reset;
256 257
	int			set_lvt_off;
	int			lvt_off;
I
Ingo Molnar 已提交
258
	u16			old_limit;
259 260
};

261 262
static inline bool is_shared_bank(int bank)
{
263 264 265 266 267 268 269
	/*
	 * Scalable MCA provides for only one core to have access to the MSRs of
	 * a shared bank.
	 */
	if (mce_flags.smca)
		return false;

270 271 272 273
	/* Bank 4 is for northbridge reporting and is thus shared */
	return (bank == 4);
}

274
static const char *bank4_names(const struct threshold_block *b)
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
{
	switch (b->address) {
	/* MSR4_MISC0 */
	case 0x00000413:
		return "dram";

	case 0xc0000408:
		return "ht_links";

	case 0xc0000409:
		return "l3_cache";

	default:
		WARN(1, "Funny MSR: 0x%08x\n", b->address);
		return "";
	}
};


294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{
	/*
	 * bank 4 supports APIC LVT interrupts implicitly since forever.
	 */
	if (bank == 4)
		return true;

	/*
	 * IntP: interrupt present; if this bit is set, the thresholding
	 * bank can generate APIC LVT interrupts
	 */
	return msr_high_bits & BIT(28);
}

309 310 311 312 313 314 315 316 317 318 319 320
static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
{
	int msr = (hi & MASK_LVTOFF_HI) >> 20;

	if (apic < 0) {
		pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
		       b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	if (apic != msr) {
321 322 323 324 325 326 327 328
		/*
		 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
		 * the BIOS provides the value. The original field where LVT offset
		 * was set is reserved. Return early here:
		 */
		if (mce_flags.smca)
			return 0;

329 330 331 332 333 334 335 336 337
		pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
		return 0;
	}

	return 1;
};

338
/* Reprogram MCx_MISC MSR behind this threshold bank. */
339
static void threshold_restart_bank(void *_tr)
340
{
341
	struct thresh_restart *tr = _tr;
342
	u32 hi, lo;
343

344
	rdmsr(tr->b->address, lo, hi);
345

346
	if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
347
		tr->reset = 1;	/* limit cannot be lower than err count */
348

349
	if (tr->reset) {		/* reset err count and overflow bit */
350 351
		hi =
		    (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
352 353
		    (THRESHOLD_MAX - tr->b->threshold_limit);
	} else if (tr->old_limit) {	/* change limit w/o reset */
354
		int new_count = (hi & THRESHOLD_MAX) +
355
		    (tr->old_limit - tr->b->threshold_limit);
I
Ingo Molnar 已提交
356

357
		hi = (hi & ~MASK_ERR_COUNT_HI) |
358 359 360
		    (new_count & THRESHOLD_MAX);
	}

361 362 363 364 365 366
	/* clear IntType */
	hi &= ~MASK_INT_TYPE_HI;

	if (!tr->b->interrupt_capable)
		goto done;

367
	if (tr->set_lvt_off) {
368 369 370 371 372
		if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
			/* set new lvt offset */
			hi &= ~MASK_LVTOFF_HI;
			hi |= tr->lvt_off << 20;
		}
373 374
	}

375 376 377 378
	if (tr->b->interrupt_enable)
		hi |= INT_TYPE_APIC;

 done:
379

380 381
	hi |= MASK_COUNT_EN_HI;
	wrmsr(tr->b->address, lo, hi);
382 383
}

384 385 386 387 388 389 390 391 392 393 394 395
static void mce_threshold_block_init(struct threshold_block *b, int offset)
{
	struct thresh_restart tr = {
		.b			= b,
		.set_lvt_off		= 1,
		.lvt_off		= offset,
	};

	b->threshold_limit		= THRESHOLD_MAX;
	threshold_restart_bank(&tr);
};

396
static int setup_APIC_mce_threshold(int reserved, int new)
397 398 399 400 401 402 403 404
{
	if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
static int setup_APIC_deferred_error(int reserved, int new)
{
	if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
					      APIC_EILVT_MSG_FIX, 0))
		return new;

	return reserved;
}

static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
{
	u32 low = 0, high = 0;
	int def_offset = -1, def_new;

	if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
		return;

	def_new = (low & MASK_DEF_LVTOFF) >> 4;
	if (!(low & MASK_DEF_LVTOFF)) {
		pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
		def_new = DEF_LVT_OFF;
		low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
	}

	def_offset = setup_APIC_deferred_error(def_offset, def_new);
	if ((def_offset == def_new) &&
	    (deferred_error_int_vector != amd_deferred_error_interrupt))
		deferred_error_int_vector = amd_deferred_error_interrupt;

434 435 436
	if (!mce_flags.smca)
		low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;

437 438 439
	wrmsr(MSR_CU_DEF_ERR, low, high);
}

440
static u32 smca_get_block_address(unsigned int bank, unsigned int block)
441 442 443 444 445 446 447 448 449 450
{
	u32 low, high;
	u32 addr = 0;

	if (smca_get_bank_type(bank) == SMCA_RESERVED)
		return addr;

	if (!block)
		return MSR_AMD64_SMCA_MCx_MISC(bank);

451 452 453 454
	/* Check our cache first: */
	if (smca_bank_addrs[bank][block] != -1)
		return smca_bank_addrs[bank][block];

455 456 457 458
	/*
	 * For SMCA enabled processors, BLKPTR field of the first MISC register
	 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
	 */
459
	if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
460
		goto out;
461 462

	if (!(low & MCI_CONFIG_MCAX))
463
		goto out;
464

465
	if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
466
	    (low & MASK_BLKPTR_LO))
467
		addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
468

469 470
out:
	smca_bank_addrs[bank][block] = addr;
471 472 473
	return addr;
}

474
static u32 get_block_address(u32 current_addr, u32 low, u32 high,
475 476 477 478
			     unsigned int bank, unsigned int block)
{
	u32 addr = 0, offset = 0;

479 480 481
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
		return addr;

482
	if (mce_flags.smca)
483
		return smca_get_block_address(bank, block);
484 485 486 487

	/* Fall back to method we used for older processors: */
	switch (block) {
	case 0:
488
		addr = msr_ops.misc(bank);
489 490 491 492 493 494 495 496 497 498 499 500
		break;
	case 1:
		offset = ((low & MASK_BLKPTR_LO) >> 21);
		if (offset)
			addr = MCG_XBLK_ADDR + offset;
		break;
	default:
		addr = ++current_addr;
	}
	return addr;
}

501 502 503 504 505
static int
prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
			int offset, u32 misc_high)
{
	unsigned int cpu = smp_processor_id();
506
	u32 smca_low, smca_high;
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
	struct threshold_block b;
	int new;

	if (!block)
		per_cpu(bank_map, cpu) |= (1 << bank);

	memset(&b, 0, sizeof(b));
	b.cpu			= cpu;
	b.bank			= bank;
	b.block			= block;
	b.address		= addr;
	b.interrupt_capable	= lvt_interrupt_supported(bank, misc_high);

	if (!b.interrupt_capable)
		goto done;

	b.interrupt_enable = 1;

525 526 527 528
	if (!mce_flags.smca) {
		new = (misc_high & MASK_LVTOFF_HI) >> 20;
		goto set_offset;
	}
529

530 531 532 533 534 535 536
	/* Gather LVT offset for thresholding: */
	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
		goto out;

	new = (smca_low & SMCA_THR_LVT_OFF) >> 12;

set_offset:
537
	offset = setup_APIC_mce_threshold(offset, new);
538 539
	if (offset == new)
		thresholding_irq_en = true;
540 541 542 543 544 545 546 547

done:
	mce_threshold_block_init(&b, offset);

out:
	return offset;
}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
/*
 * Turn off MC4_MISC thresholding banks on all family 0x15 models since
 * they're not supported there.
 */
void disable_err_thresholding(struct cpuinfo_x86 *c)
{
	int i;
	u64 hwcr;
	bool need_toggle;
	u32 msrs[] = {
		0x00000413, /* MC4_MISC0 */
		0xc0000408, /* MC4_MISC1 */
	};

	if (c->x86 != 0x15)
		return;

	rdmsrl(MSR_K7_HWCR, hwcr);

	/* McStatusWrEn has to be set */
	need_toggle = !(hwcr & BIT(18));

	if (need_toggle)
		wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));

	/* Clear CntP bit safely */
	for (i = 0; i < ARRAY_SIZE(msrs); i++)
		msr_clear_bit(msrs[i], 62);

	/* restore old settings */
	if (need_toggle)
		wrmsrl(MSR_K7_HWCR, hwcr);
}

582
/* cpu init entry point, called from mce.c with preempt off */
583
void mce_amd_feature_init(struct cpuinfo_x86 *c)
584
{
585
	u32 low = 0, high = 0, address = 0;
586
	unsigned int bank, block, cpu = smp_processor_id();
587
	int offset = -1;
588

589 590
	disable_err_thresholding(c);

591
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
592
		if (mce_flags.smca)
593
			smca_configure(bank, cpu);
594

595
		for (block = 0; block < NR_BLOCKS; ++block) {
596
			address = get_block_address(address, low, high, bank, block);
597 598
			if (!address)
				break;
599 600

			if (rdmsr_safe(address, &low, &high))
601
				break;
602

603 604
			if (!(high & MASK_VALID_HI))
				continue;
605

606 607
			if (!(high & MASK_CNTP_HI)  ||
			     (high & MASK_LOCKED_HI))
608 609
				continue;

610
			offset = prepare_threshold_block(bank, block, address, offset, high);
611
		}
612
	}
613 614 615

	if (mce_flags.succor)
		deferred_error_interrupt_enable(c);
616 617
}

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
	/* We start from the normalized address */
	u64 ret_addr = norm_addr;

	u32 tmp;

	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
	u8 intlv_addr_sel, intlv_addr_bit;
	u8 num_intlv_bits, hashed_bit;
	u8 lgcy_mmio_hole_en, base = 0;
	u8 cs_mask, cs_id = 0;
	bool hash_enabled = false;

	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
	if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
		goto out_err;

	/* Remove HiAddrOffset from normalized address, if enabled: */
	if (tmp & BIT(0)) {
		u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;

		if (norm_addr >= hi_addr_offset) {
			ret_addr -= hi_addr_offset;
			base = 1;
		}
	}

	/* Read D18F0x110 (DramBaseAddress). */
	if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
		goto out_err;

	/* Check if address range is valid. */
	if (!(tmp & BIT(0))) {
		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
			__func__, tmp);
		goto out_err;
	}

	lgcy_mmio_hole_en = tmp & BIT(1);
	intlv_num_chan	  = (tmp >> 4) & 0xF;
	intlv_addr_sel	  = (tmp >> 8) & 0x7;
	dram_base_addr	  = (tmp & GENMASK_ULL(31, 12)) << 16;

	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
	if (intlv_addr_sel > 3) {
		pr_err("%s: Invalid interleave address select %d.\n",
			__func__, intlv_addr_sel);
		goto out_err;
	}

	/* Read D18F0x114 (DramLimitAddress). */
	if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
		goto out_err;

	intlv_num_sockets = (tmp >> 8) & 0x1;
	intlv_num_dies	  = (tmp >> 10) & 0x3;
	dram_limit_addr	  = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);

	intlv_addr_bit = intlv_addr_sel + 8;

	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
	switch (intlv_num_chan) {
	case 0:	intlv_num_chan = 0; break;
	case 1: intlv_num_chan = 1; break;
	case 3: intlv_num_chan = 2; break;
	case 5:	intlv_num_chan = 3; break;
	case 7:	intlv_num_chan = 4; break;

	case 8: intlv_num_chan = 1;
		hash_enabled = true;
		break;
	default:
		pr_err("%s: Invalid number of interleaved channels %d.\n",
			__func__, intlv_num_chan);
		goto out_err;
	}

	num_intlv_bits = intlv_num_chan;

	if (intlv_num_dies > 2) {
		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
			__func__, intlv_num_dies);
		goto out_err;
	}

	num_intlv_bits += intlv_num_dies;

	/* Add a bit if sockets are interleaved. */
	num_intlv_bits += intlv_num_sockets;

	/* Assert num_intlv_bits <= 4 */
	if (num_intlv_bits > 4) {
		pr_err("%s: Invalid interleave bits %d.\n",
			__func__, num_intlv_bits);
		goto out_err;
	}

	if (num_intlv_bits > 0) {
		u64 temp_addr_x, temp_addr_i, temp_addr_y;
		u8 die_id_bit, sock_id_bit, cs_fabric_id;

		/*
		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
		 * This is the fabric id for this coherent slave. Use
		 * umc/channel# as instance id of the coherent slave
		 * for FICAA.
		 */
		if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
			goto out_err;

		cs_fabric_id = (tmp >> 8) & 0xFF;
		die_id_bit   = 0;

		/* If interleaved over more than 1 channel: */
		if (intlv_num_chan) {
			die_id_bit = intlv_num_chan;
			cs_mask	   = (1 << die_id_bit) - 1;
			cs_id	   = cs_fabric_id & cs_mask;
		}

		sock_id_bit = die_id_bit;

		/* Read D18F1x208 (SystemFabricIdMask). */
		if (intlv_num_dies || intlv_num_sockets)
			if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
				goto out_err;

		/* If interleaved over more than 1 die. */
		if (intlv_num_dies) {
			sock_id_bit  = die_id_bit + intlv_num_dies;
			die_id_shift = (tmp >> 24) & 0xF;
			die_id_mask  = (tmp >> 8) & 0xFF;

			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
		}

		/* If interleaved over more than 1 socket. */
		if (intlv_num_sockets) {
			socket_id_shift	= (tmp >> 28) & 0xF;
			socket_id_mask	= (tmp >> 16) & 0xFF;

			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
		}

		/*
		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
		 * where III is the ID for this CS, and XXXXXXYYYYY are the
		 * address bits from the post-interleaved address.
		 * "num_intlv_bits" has been calculated to tell us how many "I"
		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
		 * there are (where "I" starts).
		 */
		temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
		temp_addr_i = (cs_id << intlv_addr_bit);
		temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
		ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
	}

	/* Add dram base address */
	ret_addr += dram_base_addr;

	/* If legacy MMIO hole enabled */
	if (lgcy_mmio_hole_en) {
		if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
			goto out_err;

		dram_hole_base = tmp & GENMASK(31, 24);
		if (ret_addr >= dram_hole_base)
			ret_addr += (BIT_ULL(32) - dram_hole_base);
	}

	if (hash_enabled) {
		/* Save some parentheses and grab ls-bit at the end. */
		hashed_bit =	(ret_addr >> 12) ^
				(ret_addr >> 18) ^
				(ret_addr >> 21) ^
				(ret_addr >> 30) ^
				cs_id;

		hashed_bit &= BIT(0);

		if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
			ret_addr ^= BIT(intlv_addr_bit);
	}

	/* Is calculated system address is above DRAM limit address? */
	if (ret_addr > dram_limit_addr)
		goto out_err;

	*sys_addr = ret_addr;
	return 0;

out_err:
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);

818 819 820 821 822 823
bool amd_mce_is_memory_error(struct mce *m)
{
	/* ErrCodeExt[20:16] */
	u8 xec = (m->status >> 16) & 0x1f;

	if (mce_flags.smca)
824
		return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
825 826 827 828

	return m->bank == 4 && xec == 0x8;
}

829
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
830 831 832 833 834 835
{
	struct mce m;

	mce_setup(&m);

	m.status = status;
836
	m.misc   = misc;
837 838
	m.bank   = bank;
	m.tsc	 = rdtsc();
839

840
	if (m.status & MCI_STATUS_ADDRV) {
841
		m.addr = addr;
842

843 844 845 846 847 848 849 850 851 852 853
		/*
		 * Extract [55:<lsb>] where lsb is the least significant
		 * *valid* bit of the address bits.
		 */
		if (mce_flags.smca) {
			u8 lsb = (m.addr >> 56) & 0x3f;

			m.addr &= GENMASK_ULL(55, lsb);
		}
	}

854 855 856 857 858 859
	if (mce_flags.smca) {
		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);

		if (m.status & MCI_STATUS_SYNDV)
			rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
	}
860

861
	mce_log(&m);
862 863
}

864
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
865 866 867
{
	entering_irq();
	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
868 869
	inc_irq_stat(irq_deferred_error_count);
	deferred_error_int_vector();
870 871 872 873
	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
	exiting_ack_irq();
}

874 875 876 877 878
/*
 * Returns true if the logged error is deferred. False, otherwise.
 */
static inline bool
_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
879
{
880
	u64 status, addr = 0;
881

882 883 884
	rdmsrl(msr_stat, status);
	if (!(status & MCI_STATUS_VAL))
		return false;
885

886 887
	if (status & MCI_STATUS_ADDRV)
		rdmsrl(msr_addr, addr);
888

889
	__log_error(bank, status, addr, misc);
890

891
	wrmsrl(msr_stat, 0);
892 893

	return status & MCI_STATUS_DEFERRED;
894 895
}

896
/*
897 898 899 900 901 902 903
 * We have three scenarios for checking for Deferred errors:
 *
 * 1) Non-SMCA systems check MCA_STATUS and log error if found.
 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
 *    clear MCA_DESTAT.
 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
 *    log it.
904
 */
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
static void log_error_deferred(unsigned int bank)
{
	bool defrd;

	defrd = _log_error_bank(bank, msr_ops.status(bank),
					msr_ops.addr(bank), 0);

	if (!mce_flags.smca)
		return;

	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
	if (defrd) {
		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
		return;
	}

	/*
	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
	 * for a valid error.
	 */
	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
}

/* APIC interrupt handler for deferred errors */
static void amd_deferred_error_interrupt(void)
{
	unsigned int bank;

	for (bank = 0; bank < mca_cfg.banks; ++bank)
		log_error_deferred(bank);
}

static void log_error_thresholding(unsigned int bank, u64 misc)
{
	_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
}
942

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
static void log_and_reset_block(struct threshold_block *block)
{
	struct thresh_restart tr;
	u32 low = 0, high = 0;

	if (!block)
		return;

	if (rdmsr_safe(block->address, &low, &high))
		return;

	if (!(high & MASK_OVERFLOW_HI))
		return;

	/* Log the MCE which caused the threshold event. */
	log_error_thresholding(block->bank, ((u64)high << 32) | low);

	/* Reset threshold block after logging error. */
	memset(&tr, 0, sizeof(tr));
	tr.b = block;
	threshold_restart_bank(&tr);
}

966
/*
967 968
 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
 * goes off when error_count reaches threshold_limit.
969
 */
970
static void amd_threshold_interrupt(void)
971
{
972 973
	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
	unsigned int bank, cpu = smp_processor_id();
974

975
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
976
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
977
			continue;
978

979 980 981
		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
		if (!first_block)
			continue;
982

983 984 985 986 987 988 989
		/*
		 * The first block is also the head of the list. Check it first
		 * before iterating over the rest.
		 */
		log_and_reset_block(first_block);
		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
			log_and_reset_block(block);
990
	}
991 992 993 994 995 996 997
}

/*
 * Sysfs Interface
 */

struct threshold_attr {
J
Jacob Shin 已提交
998
	struct attribute attr;
I
Ingo Molnar 已提交
999 1000
	ssize_t (*show) (struct threshold_block *, char *);
	ssize_t (*store) (struct threshold_block *, const char *, size_t count);
1001 1002
};

I
Ingo Molnar 已提交
1003 1004 1005
#define SHOW_FIELDS(name)						\
static ssize_t show_ ## name(struct threshold_block *b, char *buf)	\
{									\
1006
	return sprintf(buf, "%lu\n", (unsigned long) b->name);		\
J
Jacob Shin 已提交
1007
}
1008 1009 1010
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)

I
Ingo Molnar 已提交
1011
static ssize_t
H
Hidetoshi Seto 已提交
1012
store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
1013
{
1014
	struct thresh_restart tr;
I
Ingo Molnar 已提交
1015 1016
	unsigned long new;

1017 1018 1019
	if (!b->interrupt_capable)
		return -EINVAL;

1020
	if (kstrtoul(buf, 0, &new) < 0)
1021
		return -EINVAL;
I
Ingo Molnar 已提交
1022

1023 1024
	b->interrupt_enable = !!new;

1025
	memset(&tr, 0, sizeof(tr));
I
Ingo Molnar 已提交
1026 1027
	tr.b		= b;

1028
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1029

H
Hidetoshi Seto 已提交
1030
	return size;
1031 1032
}

I
Ingo Molnar 已提交
1033
static ssize_t
H
Hidetoshi Seto 已提交
1034
store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
1035
{
1036
	struct thresh_restart tr;
I
Ingo Molnar 已提交
1037 1038
	unsigned long new;

1039
	if (kstrtoul(buf, 0, &new) < 0)
1040
		return -EINVAL;
I
Ingo Molnar 已提交
1041

1042 1043 1044 1045
	if (new > THRESHOLD_MAX)
		new = THRESHOLD_MAX;
	if (new < 1)
		new = 1;
I
Ingo Molnar 已提交
1046

1047
	memset(&tr, 0, sizeof(tr));
1048
	tr.old_limit = b->threshold_limit;
1049
	b->threshold_limit = new;
1050
	tr.b = b;
1051

1052
	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1053

H
Hidetoshi Seto 已提交
1054
	return size;
1055 1056
}

1057 1058
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
1059 1060 1061
	u32 lo, hi;

	rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
1062

1063 1064
	return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
				     (THRESHOLD_MAX - b->threshold_limit)));
1065 1066
}

1067 1068 1069 1070
static struct threshold_attr error_count = {
	.attr = {.name = __stringify(error_count), .mode = 0444 },
	.show = show_error_count,
};
1071

1072 1073 1074 1075 1076
#define RW_ATTR(val)							\
static struct threshold_attr val = {					\
	.attr	= {.name = __stringify(val), .mode = 0644 },		\
	.show	= show_## val,						\
	.store	= store_## val,						\
1077 1078
};

J
Jacob Shin 已提交
1079 1080
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
1081 1082 1083 1084

static struct attribute *default_attrs[] = {
	&threshold_limit.attr,
	&error_count.attr,
1085 1086
	NULL,	/* possibly interrupt_enable if supported, see below */
	NULL,
1087 1088
};

I
Ingo Molnar 已提交
1089 1090
#define to_block(k)	container_of(k, struct threshold_block, kobj)
#define to_attr(a)	container_of(a, struct threshold_attr, attr)
1091 1092 1093

static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
1094
	struct threshold_block *b = to_block(kobj);
1095 1096
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1097

1098
	ret = a->show ? a->show(b, buf) : -EIO;
I
Ingo Molnar 已提交
1099

1100 1101 1102 1103 1104 1105
	return ret;
}

static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
{
1106
	struct threshold_block *b = to_block(kobj);
1107 1108
	struct threshold_attr *a = to_attr(attr);
	ssize_t ret;
I
Ingo Molnar 已提交
1109

1110
	ret = a->store ? a->store(b, buf, count) : -EIO;
I
Ingo Molnar 已提交
1111

1112 1113 1114
	return ret;
}

1115
static const struct sysfs_ops threshold_ops = {
I
Ingo Molnar 已提交
1116 1117
	.show			= show,
	.store			= store,
1118 1119 1120
};

static struct kobj_type threshold_ktype = {
I
Ingo Molnar 已提交
1121 1122
	.sysfs_ops		= &threshold_ops,
	.default_attrs		= default_attrs,
1123 1124
};

1125 1126
static const char *get_name(unsigned int bank, struct threshold_block *b)
{
1127
	enum smca_bank_types bank_type;
1128 1129 1130 1131 1132 1133 1134 1135

	if (!mce_flags.smca) {
		if (b && bank == 4)
			return bank4_names(b);

		return th_names[bank];
	}

1136 1137
	bank_type = smca_get_bank_type(bank);
	if (bank_type >= N_SMCA_BANK_TYPES)
1138 1139 1140 1141 1142 1143 1144 1145
		return NULL;

	if (b && bank_type == SMCA_UMC) {
		if (b->block < ARRAY_SIZE(smca_umc_block_names))
			return smca_umc_block_names[b->block];
		return NULL;
	}

1146 1147 1148
	if (smca_banks[bank].hwid->count == 1)
		return smca_get_name(bank_type);

1149
	snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
B
Borislav Petkov 已提交
1150
		 "%s_%x", smca_get_name(bank_type),
1151
			  smca_banks[bank].sysfs_id);
1152 1153 1154
	return buf_mcatype;
}

1155 1156
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
				     unsigned int block, u32 address)
1157 1158
{
	struct threshold_block *b = NULL;
I
Ingo Molnar 已提交
1159 1160
	u32 low, high;
	int err;
1161

1162
	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1163 1164
		return 0;

1165
	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1166
		return 0;
1167 1168 1169 1170 1171 1172 1173 1174

	if (!(high & MASK_VALID_HI)) {
		if (block)
			goto recurse;
		else
			return 0;
	}

1175 1176
	if (!(high & MASK_CNTP_HI)  ||
	     (high & MASK_LOCKED_HI))
1177 1178 1179 1180 1181 1182
		goto recurse;

	b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
	if (!b)
		return -ENOMEM;

I
Ingo Molnar 已提交
1183 1184 1185 1186 1187
	b->block		= block;
	b->bank			= bank;
	b->cpu			= cpu;
	b->address		= address;
	b->interrupt_enable	= 0;
1188
	b->interrupt_capable	= lvt_interrupt_supported(bank, high);
I
Ingo Molnar 已提交
1189
	b->threshold_limit	= THRESHOLD_MAX;
1190

1191
	if (b->interrupt_capable) {
1192
		threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1193 1194
		b->interrupt_enable = 1;
	} else {
1195
		threshold_ktype.default_attrs[2] = NULL;
1196
	}
1197

1198 1199
	INIT_LIST_HEAD(&b->miscj);

I
Ingo Molnar 已提交
1200
	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1201 1202
		list_add(&b->miscj,
			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
I
Ingo Molnar 已提交
1203
	} else {
1204
		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
I
Ingo Molnar 已提交
1205
	}
1206

1207 1208
	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
				   per_cpu(threshold_banks, cpu)[bank]->kobj,
1209
				   get_name(bank, b));
1210 1211 1212
	if (err)
		goto out_free;
recurse:
1213
	address = get_block_address(address, low, high, bank, ++block);
1214 1215
	if (!address)
		return 0;
1216

1217
	err = allocate_threshold_blocks(cpu, bank, block, address);
1218 1219 1220
	if (err)
		goto out_free;

1221 1222
	if (b)
		kobject_uevent(&b->kobj, KOBJ_ADD);
1223

1224 1225 1226 1227
	return err;

out_free:
	if (b) {
1228
		kobject_put(&b->kobj);
1229
		list_del(&b->miscj);
1230 1231 1232 1233 1234
		kfree(b);
	}
	return err;
}

1235
static int __threshold_add_blocks(struct threshold_bank *b)
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
{
	struct list_head *head = &b->blocks->miscj;
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	int err = 0;

	err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
	if (err)
		return err;

	list_for_each_entry_safe(pos, tmp, head, miscj) {

		err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
		if (err) {
			list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
				kobject_del(&pos->kobj);

			return err;
		}
	}
	return err;
}

1259
static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1260
{
1261
	struct device *dev = per_cpu(mce_device, cpu);
1262
	struct amd_northbridge *nb = NULL;
1263
	struct threshold_bank *b = NULL;
1264
	const char *name = get_name(bank, NULL);
1265
	int err = 0;
1266

1267 1268 1269
	if (!dev)
		return -ENODEV;

1270
	if (is_shared_bank(bank)) {
1271 1272 1273
		nb = node_to_amd_nb(amd_get_nb_id(cpu));

		/* threshold descriptor already initialized on this node? */
1274
		if (nb && nb->bank4) {
1275 1276 1277 1278 1279 1280 1281
			/* yes, use it */
			b = nb->bank4;
			err = kobject_add(b->kobj, &dev->kobj, name);
			if (err)
				goto out;

			per_cpu(threshold_banks, cpu)[bank] = b;
1282
			refcount_inc(&b->cpus);
1283 1284 1285 1286 1287 1288 1289

			err = __threshold_add_blocks(b);

			goto out;
		}
	}

1290
	b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1291 1292 1293 1294 1295
	if (!b) {
		err = -ENOMEM;
		goto out;
	}

1296
	b->kobj = kobject_create_and_add(name, &dev->kobj);
1297 1298
	if (!b->kobj) {
		err = -EINVAL;
1299
		goto out_free;
1300
	}
1301

1302
	per_cpu(threshold_banks, cpu)[bank] = b;
1303

1304
	if (is_shared_bank(bank)) {
1305
		refcount_set(&b->cpus, 1);
1306 1307

		/* nb is already initialized, see above */
1308 1309 1310 1311
		if (nb) {
			WARN_ON(nb->bank4);
			nb->bank4 = b;
		}
1312 1313
	}

1314
	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1315 1316
	if (!err)
		goto out;
1317

1318
 out_free:
1319
	kfree(b);
1320 1321

 out:
1322 1323 1324
	return err;
}

1325
static void deallocate_threshold_block(unsigned int cpu,
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
						 unsigned int bank)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;
	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];

	if (!head)
		return;

	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1336
		kobject_put(&pos->kobj);
1337 1338 1339 1340 1341 1342 1343 1344
		list_del(&pos->miscj);
		kfree(pos);
	}

	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
}

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
static void __threshold_remove_blocks(struct threshold_bank *b)
{
	struct threshold_block *pos = NULL;
	struct threshold_block *tmp = NULL;

	kobject_del(b->kobj);

	list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
		kobject_del(&pos->kobj);
}

1356
static void threshold_remove_bank(unsigned int cpu, int bank)
1357
{
1358
	struct amd_northbridge *nb;
1359 1360 1361 1362 1363
	struct threshold_bank *b;

	b = per_cpu(threshold_banks, cpu)[bank];
	if (!b)
		return;
1364

1365 1366 1367
	if (!b->blocks)
		goto free_out;

1368
	if (is_shared_bank(bank)) {
1369
		if (!refcount_dec_and_test(&b->cpus)) {
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
			__threshold_remove_blocks(b);
			per_cpu(threshold_banks, cpu)[bank] = NULL;
			return;
		} else {
			/*
			 * the last CPU on this node using the shared bank is
			 * going away, remove that bank now.
			 */
			nb = node_to_amd_nb(amd_get_nb_id(cpu));
			nb->bank4 = NULL;
		}
	}

1383 1384 1385
	deallocate_threshold_block(cpu, bank);

free_out:
1386
	kobject_del(b->kobj);
1387
	kobject_put(b->kobj);
1388 1389
	kfree(b);
	per_cpu(threshold_banks, cpu)[bank] = NULL;
1390 1391
}

1392
int mce_threshold_remove_device(unsigned int cpu)
1393
{
J
Jacob Shin 已提交
1394
	unsigned int bank;
1395

1396
	for (bank = 0; bank < mca_cfg.banks; ++bank) {
1397
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1398 1399 1400
			continue;
		threshold_remove_bank(cpu, bank);
	}
1401
	kfree(per_cpu(threshold_banks, cpu));
1402
	per_cpu(threshold_banks, cpu) = NULL;
1403
	return 0;
1404 1405
}

1406
/* create dir/files for all valid threshold banks */
1407
int mce_threshold_create_device(unsigned int cpu)
1408
{
1409 1410 1411 1412
	unsigned int bank;
	struct threshold_bank **bp;
	int err = 0;

1413 1414 1415 1416
	bp = per_cpu(threshold_banks, cpu);
	if (bp)
		return 0;

K
Kees Cook 已提交
1417
	bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
		     GFP_KERNEL);
	if (!bp)
		return -ENOMEM;

	per_cpu(threshold_banks, cpu) = bp;

	for (bank = 0; bank < mca_cfg.banks; ++bank) {
		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
			continue;
		err = threshold_create_bank(cpu, bank);
		if (err)
1429
			goto err;
1430
	}
1431 1432
	return err;
err:
1433
	mce_threshold_remove_device(cpu);
1434
	return err;
1435 1436 1437 1438
}

static __init int threshold_init_device(void)
{
J
Jacob Shin 已提交
1439
	unsigned lcpu = 0;
1440 1441 1442

	/* to hit CPUs online before the notifier is up */
	for_each_online_cpu(lcpu) {
1443
		int err = mce_threshold_create_device(lcpu);
I
Ingo Molnar 已提交
1444

1445
		if (err)
1446
			return err;
1447
	}
I
Ingo Molnar 已提交
1448

1449 1450 1451
	if (thresholding_irq_en)
		mce_threshold_vector = amd_threshold_interrupt;

1452
	return 0;
1453
}
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
/*
 * there are 3 funcs which need to be _initcalled in a logic sequence:
 * 1. xen_late_init_mcelog
 * 2. mcheck_init_device
 * 3. threshold_init_device
 *
 * xen_late_init_mcelog must register xen_mce_chrdev_device before
 * native mce_chrdev_device registration if running under xen platform;
 *
 * mcheck_init_device should be inited before threshold_init_device to
 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
 *
 * so we use following _initcalls
 * 1. device_initcall(xen_late_init_mcelog);
 * 2. device_initcall_sync(mcheck_init_device);
 * 3. late_initcall(threshold_init_device);
 *
 * when running under xen, the initcall order is 1,2,3;
 * on baremetal, we skip 1 and we do only 2 and 3.
 */
late_initcall(threshold_init_device);