amd64_edac.c 107.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
#include "amd64_edac.h"
3
#include <asm/amd_nb.h>
4

5
static struct edac_pci_ctl_info *pci_ctl;
6 7 8 9 10 11 12 13

/*
 * Set by command line parameter. If BIOS has enabled the ECC, this override is
 * cleared to prevent re-enabling the hardware by this driver.
 */
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);

14
static struct msr __percpu *msrs;
15

16 17
static struct amd64_family_type *fam_type;

18
/* Per-node stuff */
19
static struct ecc_settings **ecc_stngs;
20

21 22 23
/* Device for the PCI component */
static struct device *pci_ctl_dev;

24 25 26 27 28 29 30
/*
 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
 * or higher value'.
 *
 *FIXME: Produce a better mapping/linearisation.
 */
31
static const struct scrubrate {
32 33 34
       u32 scrubval;           /* bit pattern for scrub rate */
       u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	{ 0x01, 1600000000UL},
	{ 0x02, 800000000UL},
	{ 0x03, 400000000UL},
	{ 0x04, 200000000UL},
	{ 0x05, 100000000UL},
	{ 0x06, 50000000UL},
	{ 0x07, 25000000UL},
	{ 0x08, 12284069UL},
	{ 0x09, 6274509UL},
	{ 0x0A, 3121951UL},
	{ 0x0B, 1560975UL},
	{ 0x0C, 781440UL},
	{ 0x0D, 390720UL},
	{ 0x0E, 195300UL},
	{ 0x0F, 97650UL},
	{ 0x10, 48854UL},
	{ 0x11, 24427UL},
	{ 0x12, 12213UL},
	{ 0x13, 6101UL},
	{ 0x14, 3051UL},
	{ 0x15, 1523UL},
	{ 0x16, 761UL},
	{ 0x00, 0UL},        /* scrubbing off */
};

60 61
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
			       u32 *val, const char *func)
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
{
	int err = 0;

	err = pci_read_config_dword(pdev, offset, val);
	if (err)
		amd64_warn("%s: error reading F%dx%03x.\n",
			   func, PCI_FUNC(pdev->devfn), offset);

	return err;
}

int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
				u32 val, const char *func)
{
	int err = 0;

	err = pci_write_config_dword(pdev, offset, val);
	if (err)
		amd64_warn("%s: error writing to F%dx%03x.\n",
			   func, PCI_FUNC(pdev->devfn), offset);

	return err;
}

86 87 88 89 90 91 92 93 94 95 96 97 98
/*
 * Select DCT to which PCI cfg accesses are routed
 */
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{
	u32 reg = 0;

	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
	reg &= (pvt->model == 0x30) ? ~3 : ~1;
	reg |= dct;
	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}

99 100 101 102
/*
 *
 * Depending on the family, F2 DCT reads need special handling:
 *
103
 * K8: has a single DCT only and no address offsets >= 0x100
104 105 106 107 108
 *
 * F10h: each DCT has its own set of regs
 *	DCT0 -> F2x040..
 *	DCT1 -> F2x140..
 *
109
 * F16h: has only 1 DCT
110 111
 *
 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
112
 */
113 114
static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
					 int offset, u32 *val)
115
{
116 117 118 119 120
	switch (pvt->fam) {
	case 0xf:
		if (dct || offset >= 0x100)
			return -EINVAL;
		break;
121

122 123 124 125 126 127 128 129 130
	case 0x10:
		if (dct) {
			/*
			 * Note: If ganging is enabled, barring the regs
			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
			 * return 0. (cf. Section 2.8.1 F10h BKDG)
			 */
			if (dct_ganging_enabled(pvt))
				return 0;
131

132 133 134
			offset += 0x100;
		}
		break;
135

136 137 138 139 140 141 142 143
	case 0x15:
		/*
		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
		 * We should select which DCT we access using F1x10C[DctCfgSel]
		 */
		dct = (dct && pvt->model == 0x30) ? 3 : dct;
		f15h_select_dct(pvt, dct);
		break;
144

145 146 147 148
	case 0x16:
		if (dct)
			return -EINVAL;
		break;
149

150 151
	default:
		break;
152
	}
153
	return amd64_read_pci_cfg(pvt->F2, offset, val);
154 155
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169
/*
 * Memory scrubber control interface. For K8, memory scrubbing is handled by
 * hardware and can involve L2 cache, dcache as well as the main memory. With
 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
 * functionality.
 *
 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
 * bytes/sec for the setting.
 *
 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
 * other archs, we might not have access to the caches directly.
 */

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
{
	/*
	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
	 * as 0x0, scrubval 0x6 as 0x1, etc.
	 */
	if (scrubval >= 0x5 && scrubval <= 0x14) {
		scrubval -= 0x5;
		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
	} else {
		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
	}
}
185
/*
186
 * Scan the scrub rate mapping table for a close or matching bandwidth value to
187 188
 * issue. If requested is too big, then use last maximum value found.
 */
189
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
190 191 192 193 194 195 196 197 198
{
	u32 scrubval;
	int i;

	/*
	 * map the configured rate (new_bw) to a value specific to the AMD64
	 * memory controller and apply to register. Search for the first
	 * bandwidth entry that is greater or equal than the setting requested
	 * and program that. If at last entry, turn off DRAM scrubbing.
199 200 201
	 *
	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
	 * by falling back to the last element in scrubrates[].
202
	 */
203
	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
204 205 206 207
		/*
		 * skip scrub rates which aren't recommended
		 * (see F10 BKDG, F3x58)
		 */
208
		if (scrubrates[i].scrubval < min_rate)
209 210 211 212 213 214 215 216
			continue;

		if (scrubrates[i].bandwidth <= new_bw)
			break;
	}

	scrubval = scrubrates[i].scrubval;

217
	if (pvt->umc) {
218 219
		__f17h_set_scrubval(pvt, scrubval);
	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 221 222 223 224 225 226
		f15h_select_dct(pvt, 0);
		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
		f15h_select_dct(pvt, 1);
		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
	} else {
		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
	}
227

228 229 230
	if (scrubval)
		return scrubrates[i].bandwidth;

231 232 233
	return 0;
}

234
static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
235 236
{
	struct amd64_pvt *pvt = mci->pvt_info;
237
	u32 min_scrubrate = 0x5;
238

239
	if (pvt->fam == 0xf)
240 241
		min_scrubrate = 0x0;

242 243 244 245
	if (pvt->fam == 0x15) {
		/* Erratum #505 */
		if (pvt->model < 0x10)
			f15h_select_dct(pvt, 0);
246

247 248 249 250
		if (pvt->model == 0x60)
			min_scrubrate = 0x6;
	}
	return __set_scrub_rate(pvt, bw, min_scrubrate);
251 252
}

253
static int get_scrub_rate(struct mem_ctl_info *mci)
254 255
{
	struct amd64_pvt *pvt = mci->pvt_info;
256
	int i, retval = -EINVAL;
257
	u32 scrubval = 0;
258

259
	if (pvt->umc) {
260 261 262 263 264 265 266 267
		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
		if (scrubval & BIT(0)) {
			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
			scrubval &= 0xF;
			scrubval += 0x5;
		} else {
			scrubval = 0;
		}
268 269 270 271
	} else if (pvt->fam == 0x15) {
		/* Erratum #505 */
		if (pvt->model < 0x10)
			f15h_select_dct(pvt, 0);
272

273 274
		if (pvt->model == 0x60)
			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
275 276
		else
			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
277
	} else {
278
		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
279
	}
280 281 282

	scrubval = scrubval & 0x001F;

283
	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
284
		if (scrubrates[i].scrubval == scrubval) {
285
			retval = scrubrates[i].bandwidth;
286 287 288
			break;
		}
	}
289
	return retval;
290 291
}

292
/*
293 294
 * returns true if the SysAddr given by sys_addr matches the
 * DRAM base/limit associated with node_id
295
 */
296
static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
297
{
298
	u64 addr;
299 300 301 302 303 304 305 306 307

	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
	 * all ones if the most significant implemented address bit is 1.
	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
	 * Application Programming.
	 */
	addr = sys_addr & 0x000000ffffffffffull;

308 309
	return ((addr >= get_dram_base(pvt, nid)) &&
		(addr <= get_dram_limit(pvt, nid)));
310 311 312 313 314 315 316 317 318 319 320 321
}

/*
 * Attempt to map a SysAddr to a node. On success, return a pointer to the
 * mem_ctl_info structure for the node that the SysAddr maps to.
 *
 * On failure, return NULL.
 */
static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
						u64 sys_addr)
{
	struct amd64_pvt *pvt;
322
	u8 node_id;
323 324 325 326 327 328 329 330 331 332 333 334 335
	u32 intlv_en, bits;

	/*
	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
	 * 3.4.4.2) registers to map the SysAddr to a node ID.
	 */
	pvt = mci->pvt_info;

	/*
	 * The value of this field should be the same for all DRAM Base
	 * registers.  Therefore we arbitrarily choose to read it from the
	 * register for node 0.
	 */
336
	intlv_en = dram_intlv_en(pvt, 0);
337 338

	if (intlv_en == 0) {
339
		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
340
			if (base_limit_match(pvt, sys_addr, node_id))
341
				goto found;
342
		}
343
		goto err_no_match;
344 345
	}

346 347 348
	if (unlikely((intlv_en != 0x01) &&
		     (intlv_en != 0x03) &&
		     (intlv_en != 0x07))) {
349
		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
350 351 352 353 354 355
		return NULL;
	}

	bits = (((u32) sys_addr) >> 12) & intlv_en;

	for (node_id = 0; ; ) {
356
		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
357 358
			break;	/* intlv_sel field matches */

359
		if (++node_id >= DRAM_RANGES)
360 361 362 363
			goto err_no_match;
	}

	/* sanity test for sys_addr */
364
	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 366 367
		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
			   "range for node %d with node interleaving enabled.\n",
			   __func__, sys_addr, node_id);
368 369 370 371
		return NULL;
	}

found:
372
	return edac_mc_find((int)node_id);
373 374

err_no_match:
375 376
	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
		 (unsigned long)sys_addr);
377 378 379

	return NULL;
}
380 381

/*
382 383
 * compute the CS base address of the @csrow on the DRAM controller @dct.
 * For details see F2x[5C:40] in the processor's BKDG
384
 */
385 386
static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
				 u64 *base, u64 *mask)
387
{
388 389
	u64 csbase, csmask, base_bits, mask_bits;
	u8 addr_shift;
390

391
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 393
		csbase		= pvt->csels[dct].csbases[csrow];
		csmask		= pvt->csels[dct].csmasks[csrow];
394 395
		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
396
		addr_shift	= 4;
397 398

	/*
399 400 401 402 403
	 * F16h and F15h, models 30h and later need two addr_shift values:
	 * 8 for high and 6 for low (cf. F16h BKDG).
	 */
	} else if (pvt->fam == 0x16 ||
		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 405 406
		csbase          = pvt->csels[dct].csbases[csrow];
		csmask          = pvt->csels[dct].csmasks[csrow >> 1];

407 408
		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
409 410 411

		*mask = ~0ULL;
		/* poke holes for the csmask */
412 413
		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
			   (GENMASK_ULL(30, 19) << 8));
414

415 416
		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
417 418

		return;
419 420 421 422
	} else {
		csbase		= pvt->csels[dct].csbases[csrow];
		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
		addr_shift	= 8;
423

424
		if (pvt->fam == 0x15)
425 426
			base_bits = mask_bits =
				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
427
		else
428 429
			base_bits = mask_bits =
				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
430
	}
431

432
	*base  = (csbase & base_bits) << addr_shift;
433

434 435 436 437 438
	*mask  = ~0ULL;
	/* poke holes for the csmask */
	*mask &= ~(mask_bits << addr_shift);
	/* OR them in */
	*mask |= (csmask & mask_bits) << addr_shift;
439 440
}

441 442 443
#define for_each_chip_select(i, dct, pvt) \
	for (i = 0; i < pvt->csels[dct].b_cnt; i++)

444 445 446
#define chip_select_base(i, dct, pvt) \
	pvt->csels[dct].csbases[i]

447 448 449
#define for_each_chip_select_mask(i, dct, pvt) \
	for (i = 0; i < pvt->csels[dct].m_cnt; i++)

450
#define for_each_umc(i) \
451
	for (i = 0; i < fam_type->max_mcs; i++)
452

453 454 455 456 457 458 459 460 461 462 463 464
/*
 * @input_addr is an InputAddr associated with the node given by mci. Return the
 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
 */
static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
{
	struct amd64_pvt *pvt;
	int csrow;
	u64 base, mask;

	pvt = mci->pvt_info;

465 466
	for_each_chip_select(csrow, 0, pvt) {
		if (!csrow_enabled(csrow, 0, pvt))
467 468
			continue;

469 470 471
		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);

		mask = ~mask;
472 473

		if ((input_addr & mask) == (base & mask)) {
474 475 476
			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
				 (unsigned long)input_addr, csrow,
				 pvt->mc_node_id);
477 478 479 480

			return csrow;
		}
	}
481 482
	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
		 (unsigned long)input_addr, pvt->mc_node_id);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

	return -1;
}

/*
 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
 * for the node represented by mci. Info is passed back in *hole_base,
 * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
 * info is invalid. Info may be invalid for either of the following reasons:
 *
 * - The revision of the node is not E or greater.  In this case, the DRAM Hole
 *   Address Register does not exist.
 *
 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
 *   indicating that its contents are not valid.
 *
 * The values passed back in *hole_base, *hole_offset, and *hole_size are
 * complete 32-bit values despite the fact that the bitfields in the DHAR
 * only represent bits 31-24 of the base and offset values.
 */
503 504
static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
			      u64 *hole_offset, u64 *hole_size)
505 506 507 508
{
	struct amd64_pvt *pvt = mci->pvt_info;

	/* only revE and later have the DRAM Hole Address Register */
509
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 511
		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
			 pvt->ext_model, pvt->mc_node_id);
512 513 514
		return 1;
	}

515
	/* valid for Fam10h and above */
516
	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517
		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
518 519 520
		return 1;
	}

521
	if (!dhar_valid(pvt)) {
522 523
		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
			 pvt->mc_node_id);
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
		return 1;
	}

	/* This node has Memory Hoisting */

	/* +------------------+--------------------+--------------------+-----
	 * | memory           | DRAM hole          | relocated          |
	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
	 * |                  |                    | DRAM hole          |
	 * |                  |                    | [0x100000000,      |
	 * |                  |                    |  (0x100000000+     |
	 * |                  |                    |   (0xffffffff-x))] |
	 * +------------------+--------------------+--------------------+-----
	 *
	 * Above is a diagram of physical memory showing the DRAM hole and the
	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
	 * starts at address x (the base address) and extends through address
	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
	 * addresses in the hole so that they start at 0x100000000.
	 */

545 546
	*hole_base = dhar_base(pvt);
	*hole_size = (1ULL << 32) - *hole_base;
547

548 549
	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
					: k8_dhar_offset(pvt);
550

551 552 553
	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
		 pvt->mc_node_id, (unsigned long)*hole_base,
		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
554 555 556

	return 0;
}
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573

#ifdef CONFIG_EDAC_DEBUG
#define EDAC_DCT_ATTR_SHOW(reg)						\
static ssize_t reg##_show(struct device *dev,				\
			 struct device_attribute *mattr, char *data)	\
{									\
	struct mem_ctl_info *mci = to_mci(dev);				\
	struct amd64_pvt *pvt = mci->pvt_info;				\
									\
	return sprintf(data, "0x%016llx\n", (u64)pvt->reg);		\
}

EDAC_DCT_ATTR_SHOW(dhar);
EDAC_DCT_ATTR_SHOW(dbam0);
EDAC_DCT_ATTR_SHOW(top_mem);
EDAC_DCT_ATTR_SHOW(top_mem2);

574 575
static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
			      char *data)
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
{
	struct mem_ctl_info *mci = to_mci(dev);

	u64 hole_base = 0;
	u64 hole_offset = 0;
	u64 hole_size = 0;

	get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);

	return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
						 hole_size);
}

/*
 * update NUM_DBG_ATTRS in case you add new members
 */
static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
596
static DEVICE_ATTR_RO(dram_hole);
597 598 599 600 601 602 603 604 605 606 607 608 609 610

static struct attribute *dbg_attrs[] = {
	&dev_attr_dhar.attr,
	&dev_attr_dbam.attr,
	&dev_attr_topmem.attr,
	&dev_attr_topmem2.attr,
	&dev_attr_dram_hole.attr,
	NULL
};

static const struct attribute_group dbg_group = {
	.attrs = dbg_attrs,
};

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
static ssize_t inject_section_show(struct device *dev,
				   struct device_attribute *mattr, char *buf)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	return sprintf(buf, "0x%x\n", pvt->injection.section);
}

/*
 * store error injection section value which refers to one of 4 16-byte sections
 * within a 64-byte cacheline
 *
 * range: 0..3
 */
static ssize_t inject_section_store(struct device *dev,
				    struct device_attribute *mattr,
				    const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int ret;

	ret = kstrtoul(data, 10, &value);
	if (ret < 0)
		return ret;

	if (value > 3) {
		amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
		return -EINVAL;
	}

	pvt->injection.section = (u32) value;
	return count;
}

static ssize_t inject_word_show(struct device *dev,
				struct device_attribute *mattr, char *buf)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	return sprintf(buf, "0x%x\n", pvt->injection.word);
}

/*
 * store error injection word value which refers to one of 9 16-bit word of the
 * 16-byte (128-bit + ECC bits) section
 *
 * range: 0..8
 */
static ssize_t inject_word_store(struct device *dev,
				 struct device_attribute *mattr,
				 const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int ret;

	ret = kstrtoul(data, 10, &value);
	if (ret < 0)
		return ret;

	if (value > 8) {
		amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
		return -EINVAL;
	}

	pvt->injection.word = (u32) value;
	return count;
}

static ssize_t inject_ecc_vector_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *buf)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
}

/*
 * store 16 bit error injection vector which enables injecting errors to the
 * corresponding bit within the error injection word above. When used during a
 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
 */
static ssize_t inject_ecc_vector_store(struct device *dev,
				       struct device_attribute *mattr,
				       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int ret;

	ret = kstrtoul(data, 16, &value);
	if (ret < 0)
		return ret;

	if (value & 0xFFFF0000) {
		amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
		return -EINVAL;
	}

	pvt->injection.bit_map = (u32) value;
	return count;
}

/*
 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
 * fields needed by the injection registers and read the NB Array Data Port.
 */
static ssize_t inject_read_store(struct device *dev,
				 struct device_attribute *mattr,
				 const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	unsigned long value;
	u32 section, word_bits;
	int ret;

	ret = kstrtoul(data, 10, &value);
	if (ret < 0)
		return ret;

	/* Form value to choose 16-byte section of cacheline */
	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);

	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);

	word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);

	/* Issue 'word' and 'bit' along with the READ request */
	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);

	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);

	return count;
}

/*
 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
 * fields needed by the injection registers.
 */
static ssize_t inject_write_store(struct device *dev,
				  struct device_attribute *mattr,
				  const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
	struct amd64_pvt *pvt = mci->pvt_info;
	u32 section, word_bits, tmp;
	unsigned long value;
	int ret;

	ret = kstrtoul(data, 10, &value);
	if (ret < 0)
		return ret;

	/* Form value to choose 16-byte section of cacheline */
	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);

	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);

	word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);

	pr_notice_once("Don't forget to decrease MCE polling interval in\n"
			"/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
			"so that you can get the error report faster.\n");

	on_each_cpu(disable_caches, NULL, 1);

	/* Issue 'word' and 'bit' along with the READ request */
	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);

 retry:
	/* wait until injection happens */
	amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
	if (tmp & F10_NB_ARR_ECC_WR_REQ) {
		cpu_relax();
		goto retry;
	}

	on_each_cpu(enable_caches, NULL, 1);

	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);

	return count;
}

/*
 * update NUM_INJ_ATTRS in case you add new members
 */

805 806 807 808 809
static DEVICE_ATTR_RW(inject_section);
static DEVICE_ATTR_RW(inject_word);
static DEVICE_ATTR_RW(inject_ecc_vector);
static DEVICE_ATTR_WO(inject_write);
static DEVICE_ATTR_WO(inject_read);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825

static struct attribute *inj_attrs[] = {
	&dev_attr_inject_section.attr,
	&dev_attr_inject_word.attr,
	&dev_attr_inject_ecc_vector.attr,
	&dev_attr_inject_write.attr,
	&dev_attr_inject_read.attr,
	NULL
};

static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
	struct amd64_pvt *pvt = mci->pvt_info;

826 827 828 829 830
	/* Families which have that injection hw */
	if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
		return attr->mode;

	return 0;
831 832 833 834 835 836 837
}

static const struct attribute_group inj_group = {
	.attrs = inj_attrs,
	.is_visible = inj_is_visible,
};
#endif /* CONFIG_EDAC_DEBUG */
838

839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
/*
 * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
 * assumed that sys_addr maps to the node given by mci.
 *
 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
 * then it is also involved in translating a SysAddr to a DramAddr. Sections
 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
 * These parts of the documentation are unclear. I interpret them as follows:
 *
 * When node n receives a SysAddr, it processes the SysAddr as follows:
 *
 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
 *    Limit registers for node n. If the SysAddr is not within the range
 *    specified by the base and limit values, then node n ignores the Sysaddr
 *    (since it does not map to node n). Otherwise continue to step 2 below.
 *
 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
 *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
 *    the range of relocated addresses (starting at 0x100000000) from the DRAM
 *    hole. If not, skip to step 3 below. Else get the value of the
 *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
 *    offset defined by this value from the SysAddr.
 *
 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
 *    Base register for node n. To obtain the DramAddr, subtract the base
 *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
 */
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
870
	struct amd64_pvt *pvt = mci->pvt_info;
871
	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
872
	int ret;
873

874
	dram_base = get_dram_base(pvt, pvt->mc_node_id);
875

876
	ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
877
	if (!ret) {
878 879
		if ((sys_addr >= (1ULL << 32)) &&
		    (sys_addr < ((1ULL << 32) + hole_size))) {
880 881 882
			/* use DHAR to translate SysAddr to DramAddr */
			dram_addr = sys_addr - hole_offset;

883 884 885
			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
				 (unsigned long)sys_addr,
				 (unsigned long)dram_addr);
886 887 888 889 890 891 892 893 894 895 896 897 898 899

			return dram_addr;
		}
	}

	/*
	 * Translate the SysAddr to a DramAddr as shown near the start of
	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
	 * Programmer's Manual Volume 1 Application Programming.
	 */
900
	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
901

902 903
	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
		 (unsigned long)sys_addr, (unsigned long)dram_addr);
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	return dram_addr;
}

/*
 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
 * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
 * for node interleaving.
 */
static int num_node_interleave_bits(unsigned intlv_en)
{
	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
	int n;

	BUG_ON(intlv_en > 7);
	n = intlv_shift_table[intlv_en];
	return n;
}

/* Translate the DramAddr given by @dram_addr to an InputAddr. */
static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
	struct amd64_pvt *pvt;
	int intlv_shift;
	u64 input_addr;

	pvt = mci->pvt_info;

	/*
	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
	 * concerning translating a DramAddr to an InputAddr.
	 */
935
	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
936
	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
937
		      (dram_addr & 0xfff);
938

939 940 941
	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
		 intlv_shift, (unsigned long)dram_addr,
		 (unsigned long)input_addr);
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956

	return input_addr;
}

/*
 * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
 * assumed that @sys_addr maps to the node given by mci.
 */
static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
	u64 input_addr;

	input_addr =
	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));

M
Masanari Iida 已提交
957
	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
958
		 (unsigned long)sys_addr, (unsigned long)input_addr);
959 960 961 962 963 964

	return input_addr;
}

/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
965
						    struct err_info *err)
966
{
967 968
	err->page = (u32) (error_address >> PAGE_SHIFT);
	err->offset = ((u32) error_address) & ~PAGE_MASK;
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
}

/*
 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
 * of a node that detected an ECC memory error.  mci represents the node that
 * the error address maps to (possibly different from the node that detected
 * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
 * error.
 */
static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
{
	int csrow;

	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));

	if (csrow == -1)
986 987
		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
				  "address 0x%lx\n", (unsigned long)sys_addr);
988 989
	return csrow;
}
990

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
	/* We start from the normalized address */
	u64 ret_addr = norm_addr;

	u32 tmp;

	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
	u8 intlv_addr_sel, intlv_addr_bit;
	u8 num_intlv_bits, hashed_bit;
	u8 lgcy_mmio_hole_en, base = 0;
	u8 cs_mask, cs_id = 0;
	bool hash_enabled = false;

	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
	if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
		goto out_err;

	/* Remove HiAddrOffset from normalized address, if enabled: */
	if (tmp & BIT(0)) {
		u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;

		if (norm_addr >= hi_addr_offset) {
			ret_addr -= hi_addr_offset;
			base = 1;
		}
	}

	/* Read D18F0x110 (DramBaseAddress). */
	if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
		goto out_err;

	/* Check if address range is valid. */
	if (!(tmp & BIT(0))) {
		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
			__func__, tmp);
		goto out_err;
	}

	lgcy_mmio_hole_en = tmp & BIT(1);
	intlv_num_chan	  = (tmp >> 4) & 0xF;
	intlv_addr_sel	  = (tmp >> 8) & 0x7;
	dram_base_addr	  = (tmp & GENMASK_ULL(31, 12)) << 16;

	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
	if (intlv_addr_sel > 3) {
		pr_err("%s: Invalid interleave address select %d.\n",
			__func__, intlv_addr_sel);
		goto out_err;
	}

	/* Read D18F0x114 (DramLimitAddress). */
	if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
		goto out_err;

	intlv_num_sockets = (tmp >> 8) & 0x1;
	intlv_num_dies	  = (tmp >> 10) & 0x3;
	dram_limit_addr	  = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);

	intlv_addr_bit = intlv_addr_sel + 8;

	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
	switch (intlv_num_chan) {
	case 0:	intlv_num_chan = 0; break;
	case 1: intlv_num_chan = 1; break;
	case 3: intlv_num_chan = 2; break;
	case 5:	intlv_num_chan = 3; break;
	case 7:	intlv_num_chan = 4; break;

	case 8: intlv_num_chan = 1;
		hash_enabled = true;
		break;
	default:
		pr_err("%s: Invalid number of interleaved channels %d.\n",
			__func__, intlv_num_chan);
		goto out_err;
	}

	num_intlv_bits = intlv_num_chan;

	if (intlv_num_dies > 2) {
		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
			__func__, intlv_num_dies);
		goto out_err;
	}

	num_intlv_bits += intlv_num_dies;

	/* Add a bit if sockets are interleaved. */
	num_intlv_bits += intlv_num_sockets;

	/* Assert num_intlv_bits <= 4 */
	if (num_intlv_bits > 4) {
		pr_err("%s: Invalid interleave bits %d.\n",
			__func__, num_intlv_bits);
		goto out_err;
	}

	if (num_intlv_bits > 0) {
		u64 temp_addr_x, temp_addr_i, temp_addr_y;
		u8 die_id_bit, sock_id_bit, cs_fabric_id;

		/*
		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
		 * This is the fabric id for this coherent slave. Use
		 * umc/channel# as instance id of the coherent slave
		 * for FICAA.
		 */
		if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
			goto out_err;

		cs_fabric_id = (tmp >> 8) & 0xFF;
		die_id_bit   = 0;

		/* If interleaved over more than 1 channel: */
		if (intlv_num_chan) {
			die_id_bit = intlv_num_chan;
			cs_mask	   = (1 << die_id_bit) - 1;
			cs_id	   = cs_fabric_id & cs_mask;
		}

		sock_id_bit = die_id_bit;

		/* Read D18F1x208 (SystemFabricIdMask). */
		if (intlv_num_dies || intlv_num_sockets)
			if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
				goto out_err;

		/* If interleaved over more than 1 die. */
		if (intlv_num_dies) {
			sock_id_bit  = die_id_bit + intlv_num_dies;
			die_id_shift = (tmp >> 24) & 0xF;
			die_id_mask  = (tmp >> 8) & 0xFF;

			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
		}

		/* If interleaved over more than 1 socket. */
		if (intlv_num_sockets) {
			socket_id_shift	= (tmp >> 28) & 0xF;
			socket_id_mask	= (tmp >> 16) & 0xFF;

			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
		}

		/*
		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
		 * where III is the ID for this CS, and XXXXXXYYYYY are the
		 * address bits from the post-interleaved address.
		 * "num_intlv_bits" has been calculated to tell us how many "I"
		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
		 * there are (where "I" starts).
		 */
		temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
		temp_addr_i = (cs_id << intlv_addr_bit);
		temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
		ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
	}

	/* Add dram base address */
	ret_addr += dram_base_addr;

	/* If legacy MMIO hole enabled */
	if (lgcy_mmio_hole_en) {
		if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
			goto out_err;

		dram_hole_base = tmp & GENMASK(31, 24);
		if (ret_addr >= dram_hole_base)
			ret_addr += (BIT_ULL(32) - dram_hole_base);
	}

	if (hash_enabled) {
		/* Save some parentheses and grab ls-bit at the end. */
		hashed_bit =	(ret_addr >> 12) ^
				(ret_addr >> 18) ^
				(ret_addr >> 21) ^
				(ret_addr >> 30) ^
				cs_id;

		hashed_bit &= BIT(0);

		if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
			ret_addr ^= BIT(intlv_addr_bit);
	}

	/* Is calculated system address is above DRAM limit address? */
	if (ret_addr > dram_limit_addr)
		goto out_err;

	*sys_addr = ret_addr;
	return 0;

out_err:
	return -EINVAL;
}

1190
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1191 1192 1193 1194 1195

/*
 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
 * are ECC capable.
 */
1196
static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
1197
{
1198
	unsigned long edac_cap = EDAC_FLAG_NONE;
1199 1200 1201 1202
	u8 bit;

	if (pvt->umc) {
		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1203

1204
		for_each_umc(i) {
1205 1206
			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
				continue;
1207

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
			umc_en_mask |= BIT(i);

			/* UMC Configuration bit 12 (DimmEccEn) */
			if (pvt->umc[i].umc_cfg & BIT(12))
				dimm_ecc_en_mask |= BIT(i);
		}

		if (umc_en_mask == dimm_ecc_en_mask)
			edac_cap = EDAC_FLAG_SECDED;
	} else {
		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
			? 19
			: 17;

		if (pvt->dclr0 & BIT(bit))
			edac_cap = EDAC_FLAG_SECDED;
	}
1225 1226 1227 1228

	return edac_cap;
}

1229
static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
1230

1231
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1232
{
1233
	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1234

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
	if (pvt->dram_type == MEM_LRDDR3) {
		u32 dcsm = pvt->csels[chan].csmasks[0];
		/*
		 * It's assumed all LRDIMMs in a DCT are going to be of
		 * same 'type' until proven otherwise. So, use a cs
		 * value of '0' here to get dcsm value.
		 */
		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
	}

	edac_dbg(1, "All DIMMs support ECC:%s\n",
		    (dclr & BIT(19)) ? "yes" : "no");

1248

1249 1250
	edac_dbg(1, "  PAR/ERR parity: %s\n",
		 (dclr & BIT(8)) ?  "enabled" : "disabled");
1251

1252
	if (pvt->fam == 0x10)
1253 1254
		edac_dbg(1, "  DCT 128bit mode width: %s\n",
			 (dclr & BIT(11)) ?  "128b" : "64b");
1255

1256 1257 1258 1259 1260
	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
		 (dclr & BIT(12)) ?  "yes" : "no",
		 (dclr & BIT(13)) ?  "yes" : "no",
		 (dclr & BIT(14)) ?  "yes" : "no",
		 (dclr & BIT(15)) ?  "yes" : "no");
1261 1262
}

1263 1264
#define CS_EVEN_PRIMARY		BIT(0)
#define CS_ODD_PRIMARY		BIT(1)
1265 1266
#define CS_EVEN_SECONDARY	BIT(2)
#define CS_ODD_SECONDARY	BIT(3)
1267
#define CS_3R_INTERLEAVE	BIT(4)
1268

1269 1270
#define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1271 1272

static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1273
{
1274
	u8 base, count = 0;
1275
	int cs_mode = 0;
1276

1277 1278
	if (csrow_enabled(2 * dimm, ctrl, pvt))
		cs_mode |= CS_EVEN_PRIMARY;
1279

1280 1281 1282
	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
		cs_mode |= CS_ODD_PRIMARY;

1283 1284 1285 1286
	/* Asymmetric dual-rank DIMM support. */
	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
		cs_mode |= CS_ODD_SECONDARY;

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
	/*
	 * 3 Rank inteleaving support.
	 * There should be only three bases enabled and their two masks should
	 * be equal.
	 */
	for_each_chip_select(base, ctrl, pvt)
		count += csrow_enabled(base, ctrl, pvt);

	if (count == 3 &&
	    pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
		edac_dbg(1, "3R interleaving in use.\n");
		cs_mode |= CS_3R_INTERLEAVE;
	}

1301
	return cs_mode;
1302 1303
}

1304 1305
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
1306
	int dimm, size0, size1, cs0, cs1, cs_mode;
1307 1308 1309

	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);

1310
	for (dimm = 0; dimm < 2; dimm++) {
1311 1312 1313
		cs0 = dimm * 2;
		cs1 = dimm * 2 + 1;

1314 1315 1316 1317
		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);

		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
1318 1319

		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1320 1321
				cs0,	size0,
				cs1,	size1);
1322 1323 1324 1325 1326 1327 1328 1329
	}
}

static void __dump_misc_regs_df(struct amd64_pvt *pvt)
{
	struct amd64_umc *umc;
	u32 i, tmp, umc_base;

1330
	for_each_umc(i) {
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
		umc_base = get_umc_base(i);
		umc = &pvt->umc[i];

		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);

		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);

		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);

		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");

		if (pvt->dram_type == MEM_LRDDR4) {
			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
					i, 1 << ((tmp >> 4) & 0x3));
		}

		debug_display_dimm_sizes_df(pvt, i);
	}

	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
		 pvt->dhar, dhar_base(pvt));
}

1369
/* Display and decode various NB registers for debug purposes. */
1370
static void __dump_misc_regs(struct amd64_pvt *pvt)
1371
{
1372
	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1373

1374 1375
	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1376

1377 1378 1379
	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1380

1381
	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1382

1383
	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1384

1385 1386
	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
		 pvt->dhar, dhar_base(pvt),
1387 1388
		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
				   : f10_dhar_offset(pvt));
1389

1390
	debug_display_dimm_sizes(pvt, 0);
1391

1392
	/* everything below this point is Fam10h and above */
1393
	if (pvt->fam == 0xf)
1394
		return;
1395

1396
	debug_display_dimm_sizes(pvt, 1);
1397

1398
	/* Only if NOT ganged does dclr1 have valid info */
1399
	if (!dct_ganging_enabled(pvt))
1400
		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1401 1402
}

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
/* Display and decode various NB registers for debug purposes. */
static void dump_misc_regs(struct amd64_pvt *pvt)
{
	if (pvt->umc)
		__dump_misc_regs_df(pvt);
	else
		__dump_misc_regs(pvt);

	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");

1413
	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1414 1415
}

1416
/*
1417
 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1418
 */
1419
static void prep_chip_selects(struct amd64_pvt *pvt)
1420
{
1421
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1422 1423
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1424
	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1425 1426
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1427 1428 1429 1430 1431 1432 1433 1434
	} else if (pvt->fam >= 0x17) {
		int umc;

		for_each_umc(umc) {
			pvt->csels[umc].b_cnt = 4;
			pvt->csels[umc].m_cnt = 2;
		}

1435
	} else {
1436 1437
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1438 1439 1440
	}
}

1441 1442
static void read_umc_base_mask(struct amd64_pvt *pvt)
{
1443 1444 1445 1446 1447 1448
	u32 umc_base_reg, umc_base_reg_sec;
	u32 umc_mask_reg, umc_mask_reg_sec;
	u32 base_reg, base_reg_sec;
	u32 mask_reg, mask_reg_sec;
	u32 *base, *base_sec;
	u32 *mask, *mask_sec;
1449 1450 1451 1452
	int cs, umc;

	for_each_umc(umc) {
		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1453
		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1454 1455 1456

		for_each_chip_select(cs, umc, pvt) {
			base = &pvt->csels[umc].csbases[cs];
1457
			base_sec = &pvt->csels[umc].csbases_sec[cs];
1458 1459

			base_reg = umc_base_reg + (cs * 4);
1460
			base_reg_sec = umc_base_reg_sec + (cs * 4);
1461 1462 1463 1464

			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
					 umc, cs, *base, base_reg);
1465 1466 1467 1468

			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
					 umc, cs, *base_sec, base_reg_sec);
1469 1470 1471
		}

		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1472
		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
1473 1474 1475

		for_each_chip_select_mask(cs, umc, pvt) {
			mask = &pvt->csels[umc].csmasks[cs];
1476
			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1477 1478

			mask_reg = umc_mask_reg + (cs * 4);
1479
			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1480 1481 1482 1483

			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
					 umc, cs, *mask, mask_reg);
1484 1485 1486 1487

			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
					 umc, cs, *mask_sec, mask_reg_sec);
1488 1489 1490 1491
		}
	}
}

1492
/*
1493
 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1494
 */
1495
static void read_dct_base_mask(struct amd64_pvt *pvt)
1496
{
1497
	int cs;
1498

1499
	prep_chip_selects(pvt);
1500

1501 1502
	if (pvt->umc)
		return read_umc_base_mask(pvt);
1503

1504
	for_each_chip_select(cs, 0, pvt) {
1505 1506
		int reg0   = DCSB0 + (cs * 4);
		int reg1   = DCSB1 + (cs * 4);
1507 1508
		u32 *base0 = &pvt->csels[0].csbases[cs];
		u32 *base1 = &pvt->csels[1].csbases[cs];
1509

1510 1511 1512
		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
				 cs, *base0, reg0);
1513

1514 1515
		if (pvt->fam == 0xf)
			continue;
1516

1517 1518 1519 1520
		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
				 cs, *base1, (pvt->fam == 0x10) ? reg1
							: reg0);
1521 1522
	}

1523
	for_each_chip_select_mask(cs, 0, pvt) {
1524 1525
		int reg0   = DCSM0 + (cs * 4);
		int reg1   = DCSM1 + (cs * 4);
1526 1527
		u32 *mask0 = &pvt->csels[0].csmasks[cs];
		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1528

1529 1530 1531
		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
				 cs, *mask0, reg0);
1532

1533 1534
		if (pvt->fam == 0xf)
			continue;
1535

1536 1537 1538 1539
		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
				 cs, *mask1, (pvt->fam == 0x10) ? reg1
							: reg0);
1540 1541 1542
	}
}

1543
static void determine_memory_type(struct amd64_pvt *pvt)
1544
{
1545
	u32 dram_ctrl, dcsm;
1546

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	if (pvt->umc) {
		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
			pvt->dram_type = MEM_LRDDR4;
		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
			pvt->dram_type = MEM_RDDR4;
		else
			pvt->dram_type = MEM_DDR4;
		return;
	}

1557 1558 1559 1560 1561 1562 1563 1564 1565
	switch (pvt->fam) {
	case 0xf:
		if (pvt->ext_model >= K8_REV_F)
			goto ddr3;

		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
		return;

	case 0x10:
1566
		if (pvt->dchr0 & DDR3_MODE)
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
			goto ddr3;

		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
		return;

	case 0x15:
		if (pvt->model < 0x60)
			goto ddr3;

		/*
		 * Model 0x60h needs special handling:
		 *
		 * We use a Chip Select value of '0' to obtain dcsm.
		 * Theoretically, it is possible to populate LRDIMMs of different
		 * 'Rank' value on a DCT. But this is not the common case. So,
		 * it's reasonable to assume all DIMMs are going to be of same
		 * 'type' until proven otherwise.
		 */
		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
		dcsm = pvt->csels[0].csmasks[0];

		if (((dram_ctrl >> 8) & 0x7) == 0x2)
			pvt->dram_type = MEM_DDR4;
		else if (pvt->dclr0 & BIT(16))
			pvt->dram_type = MEM_DDR3;
		else if (dcsm & 0x3)
			pvt->dram_type = MEM_LRDDR3;
1594
		else
1595
			pvt->dram_type = MEM_RDDR3;
1596

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
		return;

	case 0x16:
		goto ddr3;

	default:
		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
		pvt->dram_type = MEM_EMPTY;
	}
	return;
1607

1608 1609
ddr3:
	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1610 1611
}

1612
/* Get the number of DCT channels the memory controller is using. */
1613 1614
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
1615
	int flag;
1616

1617
	if (pvt->ext_model >= K8_REV_F)
1618
		/* RevF (NPT) and later */
1619
		flag = pvt->dclr0 & WIDTH_128;
1620
	else
1621 1622 1623 1624 1625 1626 1627 1628 1629
		/* RevE and earlier */
		flag = pvt->dclr0 & REVE_WIDTH_128;

	/* not used */
	pvt->dclr1 = 0;

	return (flag) ? 2 : 1;
}

1630
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1631
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1632
{
1633
	u16 mce_nid = topology_die_id(m->extcpu);
1634
	struct mem_ctl_info *mci;
1635 1636
	u8 start_bit = 1;
	u8 end_bit   = 47;
1637 1638 1639 1640 1641 1642 1643
	u64 addr;

	mci = edac_mc_find(mce_nid);
	if (!mci)
		return 0;

	pvt = mci->pvt_info;
1644

1645
	if (pvt->fam == 0xf) {
1646 1647 1648 1649
		start_bit = 3;
		end_bit   = 39;
	}

1650
	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1651 1652 1653 1654

	/*
	 * Erratum 637 workaround
	 */
1655
	if (pvt->fam == 0x15) {
1656 1657
		u64 cc6_base, tmp_addr;
		u32 tmp;
1658
		u8 intlv_en;
1659

1660
		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1661 1662 1663 1664 1665 1666 1667
			return addr;


		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
		intlv_en = tmp >> 21 & 0x7;

		/* add [47:27] + 3 trailing bits */
1668
		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1669 1670 1671 1672 1673 1674 1675 1676

		/* reverse and add DramIntlvEn */
		cc6_base |= intlv_en ^ 0x7;

		/* pin at [47:24] */
		cc6_base <<= 24;

		if (!intlv_en)
1677
			return cc6_base | (addr & GENMASK_ULL(23, 0));
1678 1679 1680 1681

		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);

							/* faster log2 */
1682
		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1683 1684

		/* OR DramIntlvSel into bits [14:12] */
1685
		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1686 1687

		/* add remaining [11:0] bits from original MC4_ADDR */
1688
		tmp_addr |= addr & GENMASK_ULL(11, 0);
1689 1690 1691 1692 1693

		return cc6_base | tmp_addr;
	}

	return addr;
1694 1695
}

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
static struct pci_dev *pci_get_related_function(unsigned int vendor,
						unsigned int device,
						struct pci_dev *related)
{
	struct pci_dev *dev = NULL;

	while ((dev = pci_get_device(vendor, device, dev))) {
		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
		    (dev->bus->number == related->bus->number) &&
		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
			break;
	}

	return dev;
}

1712
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1713
{
1714
	struct amd_northbridge *nb;
1715 1716
	struct pci_dev *f1 = NULL;
	unsigned int pci_func;
1717
	int off = range << 3;
1718
	u32 llim;
1719

1720 1721
	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1722

1723
	if (pvt->fam == 0xf)
1724
		return;
1725

1726 1727
	if (!dram_rw(pvt, range))
		return;
1728

1729 1730
	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1731

1732
	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1733
	if (pvt->fam != 0x15)
1734
		return;
1735

1736 1737 1738
	nb = node_to_amd_nb(dram_dst_node(pvt, range));
	if (WARN_ON(!nb))
		return;
1739

1740 1741 1742 1743 1744 1745
	if (pvt->model == 0x60)
		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
	else if (pvt->model == 0x30)
		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
	else
		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1746 1747

	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1748 1749
	if (WARN_ON(!f1))
		return;
1750

1751
	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1752

1753
	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1754

1755 1756
				    /* {[39:27],111b} */
	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1757

1758
	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1759

1760 1761 1762 1763
				    /* [47:40] */
	pvt->ranges[range].lim.hi |= llim >> 13;

	pci_dev_put(f1);
1764 1765
}

1766
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1767
				    struct err_info *err)
1768
{
1769
	struct amd64_pvt *pvt = mci->pvt_info;
1770

1771
	error_address_to_page_and_offset(sys_addr, err);
1772 1773 1774 1775 1776

	/*
	 * Find out which node the error address belongs to. This may be
	 * different from the node that detected the error.
	 */
1777 1778
	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
	if (!err->src_mci) {
1779 1780
		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
			     (unsigned long)sys_addr);
1781
		err->err_code = ERR_NODE;
1782 1783 1784 1785
		return;
	}

	/* Now map the sys_addr to a CSROW */
1786 1787 1788
	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
	if (err->csrow < 0) {
		err->err_code = ERR_CSROW;
1789 1790 1791
		return;
	}

1792
	/* CHIPKILL enabled */
1793
	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1794 1795
		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
		if (err->channel < 0) {
1796 1797 1798 1799 1800
			/*
			 * Syndrome didn't map, so we don't know which of the
			 * 2 DIMMs is in error. So we need to ID 'both' of them
			 * as suspect.
			 */
1801
			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1802
				      "possible error reporting race\n",
1803 1804
				      err->syndrome);
			err->err_code = ERR_CHANNEL;
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
			return;
		}
	} else {
		/*
		 * non-chipkill ecc mode
		 *
		 * The k8 documentation is unclear about how to determine the
		 * channel number when using non-chipkill memory.  This method
		 * was obtained from email communication with someone at AMD.
		 * (Wish the email was placed in this comment - norsk)
		 */
1816
		err->channel = ((sys_addr & BIT(3)) != 0);
1817 1818 1819
	}
}

1820
static int ddr2_cs_size(unsigned i, bool dct_width)
1821
{
1822
	unsigned shift = 0;
1823

1824 1825 1826 1827
	if (i <= 2)
		shift = i;
	else if (!(i & 0x1))
		shift = i >> 1;
1828
	else
1829
		shift = (i + 1) >> 1;
1830

1831 1832 1833 1834
	return 128 << (shift + !!dct_width);
}

static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1835
				  unsigned cs_mode, int cs_mask_nr)
1836 1837 1838 1839 1840 1841 1842 1843
{
	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;

	if (pvt->ext_model >= K8_REV_F) {
		WARN_ON(cs_mode > 11);
		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
	}
	else if (pvt->ext_model >= K8_REV_D) {
1844
		unsigned diff;
1845 1846
		WARN_ON(cs_mode > 10);

1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
		/*
		 * the below calculation, besides trying to win an obfuscated C
		 * contest, maps cs_mode values to DIMM chip select sizes. The
		 * mappings are:
		 *
		 * cs_mode	CS size (mb)
		 * =======	============
		 * 0		32
		 * 1		64
		 * 2		128
		 * 3		128
		 * 4		256
		 * 5		512
		 * 6		256
		 * 7		512
		 * 8		1024
		 * 9		1024
		 * 10		2048
		 *
		 * Basically, it calculates a value with which to shift the
		 * smallest CS size of 32MB.
		 *
		 * ddr[23]_cs_size have a similar purpose.
		 */
		diff = cs_mode/3 + (unsigned)(cs_mode > 5);

		return 32 << (cs_mode - diff);
1874 1875 1876 1877 1878
	}
	else {
		WARN_ON(cs_mode > 6);
		return 32 << cs_mode;
	}
1879 1880
}

1881 1882 1883 1884 1885 1886 1887 1888
/*
 * Get the number of DCT channels in use.
 *
 * Return:
 *	number of Memory Channels in operation
 * Pass back:
 *	contents of the DCL0_LOW register
 */
1889
static int f1x_early_channel_count(struct amd64_pvt *pvt)
1890
{
1891
	int i, j, channels = 0;
1892

1893
	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1894
	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1895
		return 2;
1896 1897

	/*
1898 1899 1900
	 * Need to check if in unganged mode: In such, there are 2 channels,
	 * but they are not in 128 bit mode and thus the above 'dclr0' status
	 * bit will be OFF.
1901 1902 1903 1904
	 *
	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
	 * their CSEnable bit on. If so, then SINGLE DIMM case.
	 */
1905
	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1906

1907 1908 1909 1910 1911
	/*
	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
	 * is more than just one DIMM present in unganged mode. Need to check
	 * both controllers since DIMMs can be placed in either one.
	 */
1912 1913
	for (i = 0; i < 2; i++) {
		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1914

1915 1916 1917 1918 1919 1920
		for (j = 0; j < 4; j++) {
			if (DBAM_DIMM(j, dbam) > 0) {
				channels++;
				break;
			}
		}
1921 1922
	}

1923 1924 1925
	if (channels > 2)
		channels = 2;

1926
	amd64_info("MCT channel count: %d\n", channels);
1927 1928 1929 1930

	return channels;
}

1931 1932 1933 1934 1935
static int f17_early_channel_count(struct amd64_pvt *pvt)
{
	int i, channels = 0;

	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1936
	for_each_umc(i)
1937 1938 1939 1940 1941 1942 1943
		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);

	amd64_info("MCT channel count: %d\n", channels);

	return channels;
}

1944
static int ddr3_cs_size(unsigned i, bool dct_width)
1945
{
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
	unsigned shift = 0;
	int cs_size = 0;

	if (i == 0 || i == 3 || i == 4)
		cs_size = -1;
	else if (i <= 2)
		shift = i;
	else if (i == 12)
		shift = 7;
	else if (!(i & 0x1))
		shift = i >> 1;
	else
		shift = (i + 1) >> 1;

	if (cs_size != -1)
		cs_size = (128 * (1 << !!dct_width)) << shift;

	return cs_size;
}

1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
{
	unsigned shift = 0;
	int cs_size = 0;

	if (i < 4 || i == 6)
		cs_size = -1;
	else if (i == 12)
		shift = 7;
	else if (!(i & 0x1))
		shift = i >> 1;
	else
		shift = (i + 1) >> 1;

	if (cs_size != -1)
		cs_size = rank_multiply * (128 << shift);

	return cs_size;
}

static int ddr4_cs_size(unsigned i)
{
	int cs_size = 0;

	if (i == 0)
		cs_size = -1;
	else if (i == 1)
		cs_size = 1024;
	else
		/* Min cs_size = 1G */
		cs_size = 1024 * (1 << (i >> 1));

	return cs_size;
}

2001
static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2002
				   unsigned cs_mode, int cs_mask_nr)
2003 2004 2005 2006
{
	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;

	WARN_ON(cs_mode > 11);
2007 2008

	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2009
		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
2010
	else
2011 2012 2013 2014 2015 2016 2017
		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}

/*
 * F15h supports only 64bit DCT interfaces
 */
static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2018
				   unsigned cs_mode, int cs_mask_nr)
2019 2020
{
	WARN_ON(cs_mode > 12);
2021

2022
	return ddr3_cs_size(cs_mode, false);
2023 2024
}

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
/* F15h M60h supports DDR4 mapping as well.. */
static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
					unsigned cs_mode, int cs_mask_nr)
{
	int cs_size;
	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];

	WARN_ON(cs_mode > 12);

	if (pvt->dram_type == MEM_DDR4) {
		if (cs_mode > 9)
			return -1;

		cs_size = ddr4_cs_size(cs_mode);
	} else if (pvt->dram_type == MEM_LRDDR3) {
		unsigned rank_multiply = dcsm & 0xf;

		if (rank_multiply == 3)
			rank_multiply = 4;
		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
	} else {
		/* Minimum cs size is 512mb for F15hM60h*/
		if (cs_mode == 0x1)
			return -1;

		cs_size = ddr3_cs_size(cs_mode, false);
	}

	return cs_size;
}

2056
/*
2057
 * F16h and F15h model 30h have only limited cs_modes.
2058 2059
 */
static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2060
				unsigned cs_mode, int cs_mask_nr)
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
{
	WARN_ON(cs_mode > 12);

	if (cs_mode == 6 || cs_mode == 8 ||
	    cs_mode == 9 || cs_mode == 12)
		return -1;
	else
		return ddr3_cs_size(cs_mode, false);
}

2071
static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
2072 2073
				    unsigned int cs_mode, int csrow_nr)
{
2074 2075 2076
	u32 addr_mask_orig, addr_mask_deinterleaved;
	u32 msb, weight, num_zero_bits;
	int dimm, size = 0;
2077

2078 2079 2080
	/* No Chip Selects are enabled. */
	if (!cs_mode)
		return size;
2081

2082 2083 2084
	/* Requested size of an even CS but none are enabled. */
	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
		return size;
2085

2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	/* Requested size of an odd CS but none are enabled. */
	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
		return size;

	/*
	 * There is one mask per DIMM, and two Chip Selects per DIMM.
	 *	CS0 and CS1 -> DIMM0
	 *	CS2 and CS3 -> DIMM1
	 */
	dimm = csrow_nr >> 1;

2097 2098 2099 2100 2101
	/* Asymmetric dual-rank DIMM support. */
	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
	else
		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
2102 2103 2104 2105 2106 2107 2108

	/*
	 * The number of zero bits in the mask is equal to the number of bits
	 * in a full mask minus the number of bits in the current mask.
	 *
	 * The MSB is the number of bits in the full mask because BIT[0] is
	 * always 0.
2109 2110 2111 2112
	 *
	 * In the special 3 Rank interleaving case, a single bit is flipped
	 * without swapping with the most significant bit. This can be handled
	 * by keeping the MSB where it is and ignoring the single zero bit.
2113 2114 2115
	 */
	msb = fls(addr_mask_orig) - 1;
	weight = hweight_long(addr_mask_orig);
2116
	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126

	/* Take the number of zero bits off from the top of the mask. */
	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);

	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);

	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
	size = (addr_mask_deinterleaved >> 2) + 1;
2127 2128 2129 2130 2131

	/* Return size in MBs. */
	return size >> 10;
}

2132
static void read_dram_ctl_register(struct amd64_pvt *pvt)
2133 2134
{

2135
	if (pvt->fam == 0xf)
2136 2137
		return;

2138
	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2139 2140
		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2141

2142 2143
		edac_dbg(0, "  DCTs operate in %s mode\n",
			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2144 2145

		if (!dct_ganging_enabled(pvt))
2146 2147
			edac_dbg(0, "  Address range split per DCT: %s\n",
				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2148

2149 2150 2151
		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
			 (dct_memory_cleared(pvt) ? "yes" : "no"));
2152

2153 2154 2155 2156
		edac_dbg(0, "  channel interleave: %s, "
			 "interleave bits selector: 0x%x\n",
			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
			 dct_sel_interleave_addr(pvt));
2157 2158
	}

2159
	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2160 2161
}

2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
/*
 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
 * 2.10.12 Memory Interleaving Modes).
 */
static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
				     u8 intlv_en, int num_dcts_intlv,
				     u32 dct_sel)
{
	u8 channel = 0;
	u8 select;

	if (!(intlv_en))
		return (u8)(dct_sel);

	if (num_dcts_intlv == 2) {
		select = (sys_addr >> 8) & 0x3;
		channel = select ? 0x3 : 0;
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
	} else if (num_dcts_intlv == 4) {
		u8 intlv_addr = dct_sel_interleave_addr(pvt);
		switch (intlv_addr) {
		case 0x4:
			channel = (sys_addr >> 8) & 0x3;
			break;
		case 0x5:
			channel = (sys_addr >> 9) & 0x3;
			break;
		}
	}
2190 2191 2192
	return channel;
}

2193
/*
2194
 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2195 2196
 * Interleaving Modes.
 */
2197
static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2198
				bool hi_range_sel, u8 intlv_en)
2199
{
2200
	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2201 2202

	if (dct_ganging_enabled(pvt))
2203
		return 0;
2204

2205 2206
	if (hi_range_sel)
		return dct_sel_high;
2207

2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	/*
	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
	 */
	if (dct_interleave_enabled(pvt)) {
		u8 intlv_addr = dct_sel_interleave_addr(pvt);

		/* return DCT select function: 0=DCT0, 1=DCT1 */
		if (!intlv_addr)
			return sys_addr >> 6 & 1;

		if (intlv_addr & 0x2) {
			u8 shift = intlv_addr & 0x1 ? 9 : 6;
2220
			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2221 2222 2223 2224

			return ((sys_addr >> shift) & 1) ^ temp;
		}

2225 2226 2227 2228 2229 2230
		if (intlv_addr & 0x4) {
			u8 shift = intlv_addr & 0x1 ? 9 : 8;

			return (sys_addr >> shift) & 1;
		}

2231 2232 2233 2234 2235
		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
	}

	if (dct_high_range_enabled(pvt))
		return ~dct_sel_high & 1;
2236 2237 2238 2239

	return 0;
}

2240
/* Convert the sys_addr to the normalized DCT address */
2241
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2242 2243
				 u64 sys_addr, bool hi_rng,
				 u32 dct_sel_base_addr)
2244 2245
{
	u64 chan_off;
2246 2247
	u64 dram_base		= get_dram_base(pvt, range);
	u64 hole_off		= f10_dhar_offset(pvt);
2248
	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2249

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
	if (hi_rng) {
		/*
		 * if
		 * base address of high range is below 4Gb
		 * (bits [47:27] at [31:11])
		 * DRAM address space on this DCT is hoisted above 4Gb	&&
		 * sys_addr > 4Gb
		 *
		 *	remove hole offset from sys_addr
		 * else
		 *	remove high range offset from sys_addr
		 */
		if ((!(dct_sel_base_addr >> 16) ||
		     dct_sel_base_addr < dhar_base(pvt)) &&
2264
		    dhar_valid(pvt) &&
2265
		    (sys_addr >= BIT_64(32)))
2266
			chan_off = hole_off;
2267 2268 2269
		else
			chan_off = dct_sel_base_off;
	} else {
2270 2271 2272 2273 2274 2275 2276 2277 2278
		/*
		 * if
		 * we have a valid hole		&&
		 * sys_addr > 4Gb
		 *
		 *	remove hole
		 * else
		 *	remove dram base to normalize to DCT address
		 */
2279
		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2280
			chan_off = hole_off;
2281
		else
2282
			chan_off = dram_base;
2283 2284
	}

2285
	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2286 2287 2288 2289 2290 2291
}

/*
 * checks if the csrow passed in is marked as SPARED, if so returns the new
 * spare row
 */
2292
static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2293
{
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
	int tmp_cs;

	if (online_spare_swap_done(pvt, dct) &&
	    csrow == online_spare_bad_dramcs(pvt, dct)) {

		for_each_chip_select(tmp_cs, dct, pvt) {
			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
				csrow = tmp_cs;
				break;
			}
		}
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
	}
	return csrow;
}

/*
 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
 *
 * Return:
 *	-EINVAL:  NOT FOUND
 *	0..csrow = Chip-Select Row
 */
2317
static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2318 2319 2320
{
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
2321
	u64 cs_base, cs_mask;
2322 2323 2324
	int cs_found = -EINVAL;
	int csrow;

2325
	mci = edac_mc_find(nid);
2326 2327 2328 2329 2330
	if (!mci)
		return cs_found;

	pvt = mci->pvt_info;

2331
	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2332

2333 2334
	for_each_chip_select(csrow, dct, pvt) {
		if (!csrow_enabled(csrow, dct, pvt))
2335 2336
			continue;

2337
		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2338

2339 2340
		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
			 csrow, cs_base, cs_mask);
2341

2342
		cs_mask = ~cs_mask;
2343

2344 2345
		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
			 (in_addr & cs_mask), (cs_base & cs_mask));
2346

2347
		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2348 2349 2350 2351
			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
				cs_found =  csrow;
				break;
			}
2352
			cs_found = f10_process_possible_spare(pvt, dct, csrow);
2353

2354
			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2355 2356 2357 2358 2359 2360
			break;
		}
	}
	return cs_found;
}

2361 2362 2363 2364 2365
/*
 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
 * swapped with a region located at the bottom of memory so that the GPU can use
 * the interleaved region and thus two channels.
 */
2366
static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2367 2368 2369
{
	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;

2370
	if (pvt->fam == 0x10) {
2371
		/* only revC3 and revE have that feature */
2372
		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2373 2374 2375
			return sys_addr;
	}

2376
	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394

	if (!(swap_reg & 0x1))
		return sys_addr;

	swap_base	= (swap_reg >> 3) & 0x7f;
	swap_limit	= (swap_reg >> 11) & 0x7f;
	rgn_size	= (swap_reg >> 20) & 0x7f;
	tmp_addr	= sys_addr >> 27;

	if (!(sys_addr >> 34) &&
	    (((tmp_addr >= swap_base) &&
	     (tmp_addr <= swap_limit)) ||
	     (tmp_addr < rgn_size)))
		return sys_addr ^ (u64)swap_base << 27;

	return sys_addr;
}

2395
/* For a given @dram_range, check if @sys_addr falls within it. */
2396
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2397
				  u64 sys_addr, int *chan_sel)
2398
{
2399
	int cs_found = -EINVAL;
2400
	u64 chan_addr;
2401
	u32 dct_sel_base;
2402
	u8 channel;
2403
	bool high_range = false;
2404

2405
	u8 node_id    = dram_dst_node(pvt, range);
2406
	u8 intlv_en   = dram_intlv_en(pvt, range);
2407
	u32 intlv_sel = dram_intlv_sel(pvt, range);
2408

2409 2410
	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
		 range, sys_addr, get_dram_limit(pvt, range));
2411

2412 2413 2414 2415 2416 2417 2418 2419
	if (dhar_valid(pvt) &&
	    dhar_base(pvt) <= sys_addr &&
	    sys_addr < BIT_64(32)) {
		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
			    sys_addr);
		return -EINVAL;
	}

2420
	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2421 2422
		return -EINVAL;

2423
	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2424

2425 2426 2427 2428 2429 2430 2431 2432 2433
	dct_sel_base = dct_sel_baseaddr(pvt);

	/*
	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
	 * select between DCT0 and DCT1.
	 */
	if (dct_high_range_enabled(pvt) &&
	   !dct_ganging_enabled(pvt) &&
	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2434
		high_range = true;
2435

2436
	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2437

2438
	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2439
					  high_range, dct_sel_base);
2440

2441 2442 2443 2444
	/* Remove node interleaving, see F1x120 */
	if (intlv_en)
		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
			    (chan_addr & 0xfff);
2445

2446
	/* remove channel interleave */
2447 2448 2449
	if (dct_interleave_enabled(pvt) &&
	   !dct_high_range_enabled(pvt) &&
	   !dct_ganging_enabled(pvt)) {
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463

		if (dct_sel_interleave_addr(pvt) != 1) {
			if (dct_sel_interleave_addr(pvt) == 0x3)
				/* hash 9 */
				chan_addr = ((chan_addr >> 10) << 9) |
					     (chan_addr & 0x1ff);
			else
				/* A[6] or hash 6 */
				chan_addr = ((chan_addr >> 7) << 6) |
					     (chan_addr & 0x3f);
		} else
			/* A[12] */
			chan_addr = ((chan_addr >> 13) << 12) |
				     (chan_addr & 0xfff);
2464 2465
	}

2466
	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2467

2468
	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2469

2470
	if (cs_found >= 0)
2471
		*chan_sel = channel;
2472

2473 2474 2475
	return cs_found;
}

2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
					u64 sys_addr, int *chan_sel)
{
	int cs_found = -EINVAL;
	int num_dcts_intlv = 0;
	u64 chan_addr, chan_offset;
	u64 dct_base, dct_limit;
	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;

	u64 dhar_offset		= f10_dhar_offset(pvt);
	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
	u8 node_id		= dram_dst_node(pvt, range);
	u8 intlv_en		= dram_intlv_en(pvt, range);

	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);

	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);

	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
		 range, sys_addr, get_dram_limit(pvt, range));

	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
	    !(get_dram_limit(pvt, range) >= sys_addr))
		return -EINVAL;

	if (dhar_valid(pvt) &&
	    dhar_base(pvt) <= sys_addr &&
	    sys_addr < BIT_64(32)) {
		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
			    sys_addr);
		return -EINVAL;
	}

	/* Verify sys_addr is within DCT Range. */
2513 2514
	dct_base = (u64) dct_sel_baseaddr(pvt);
	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2515 2516

	if (!(dct_cont_base_reg & BIT(0)) &&
2517 2518
	    !(dct_base <= (sys_addr >> 27) &&
	      dct_limit >= (sys_addr >> 27)))
2519 2520 2521 2522 2523 2524 2525 2526
		return -EINVAL;

	/* Verify number of dct's that participate in channel interleaving. */
	num_dcts_intlv = (int) hweight8(intlv_en);

	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
		return -EINVAL;

2527 2528 2529 2530 2531
	if (pvt->model >= 0x60)
		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
	else
		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
						     num_dcts_intlv, dct_sel);
2532 2533

	/* Verify we stay within the MAX number of channels allowed */
2534
	if (channel > 3)
2535 2536 2537 2538 2539 2540 2541 2542
		return -EINVAL;

	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));

	/* Get normalized DCT addr */
	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
		chan_offset = dhar_offset;
	else
2543
		chan_offset = dct_base << 27;
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572

	chan_addr = sys_addr - chan_offset;

	/* remove channel interleave */
	if (num_dcts_intlv == 2) {
		if (intlv_addr == 0x4)
			chan_addr = ((chan_addr >> 9) << 8) |
						(chan_addr & 0xff);
		else if (intlv_addr == 0x5)
			chan_addr = ((chan_addr >> 10) << 9) |
						(chan_addr & 0x1ff);
		else
			return -EINVAL;

	} else if (num_dcts_intlv == 4) {
		if (intlv_addr == 0x4)
			chan_addr = ((chan_addr >> 10) << 8) |
							(chan_addr & 0xff);
		else if (intlv_addr == 0x5)
			chan_addr = ((chan_addr >> 11) << 9) |
							(chan_addr & 0x1ff);
		else
			return -EINVAL;
	}

	if (dct_offset_en) {
		amd64_read_pci_cfg(pvt->F1,
				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
				   &tmp);
2573
		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
	}

	f15h_select_dct(pvt, channel);

	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);

	/*
	 * Find Chip select:
	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
	 * there is support for 4 DCT's, but only 2 are currently functional.
	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
	 * pvt->csels[1]. So we need to use '1' here to get correct info.
	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
	 */
	alias_channel =  (channel == 3) ? 1 : channel;

	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);

	if (cs_found >= 0)
		*chan_sel = alias_channel;

	return cs_found;
}

static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
					u64 sys_addr,
					int *chan_sel)
2601
{
2602 2603
	int cs_found = -EINVAL;
	unsigned range;
2604

2605 2606
	for (range = 0; range < DRAM_RANGES; range++) {
		if (!dram_rw(pvt, range))
2607 2608
			continue;

2609 2610 2611 2612
		if (pvt->fam == 0x15 && pvt->model >= 0x30)
			cs_found = f15_m30h_match_to_this_node(pvt, range,
							       sys_addr,
							       chan_sel);
2613

2614 2615
		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
			 (get_dram_limit(pvt, range) >= sys_addr)) {
2616
			cs_found = f1x_match_to_this_node(pvt, range,
2617
							  sys_addr, chan_sel);
2618 2619 2620 2621 2622 2623 2624 2625
			if (cs_found >= 0)
				break;
		}
	}
	return cs_found;
}

/*
2626 2627
 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2628
 *
2629 2630
 * The @sys_addr is usually an error address received from the hardware
 * (MCX_ADDR).
2631
 */
2632
static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2633
				     struct err_info *err)
2634 2635 2636
{
	struct amd64_pvt *pvt = mci->pvt_info;

2637
	error_address_to_page_and_offset(sys_addr, err);
2638

2639 2640 2641
	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
	if (err->csrow < 0) {
		err->err_code = ERR_CSROW;
2642 2643 2644 2645 2646 2647 2648 2649
		return;
	}

	/*
	 * We need the syndromes for channel detection only when we're
	 * ganged. Otherwise @chan should already contain the channel at
	 * this point.
	 */
2650
	if (dct_ganging_enabled(pvt))
2651
		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2652 2653 2654
}

/*
2655
 * debug routine to display the memory sizes of all logical DIMMs and its
2656
 * CSROWs
2657
 */
2658
static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2659
{
2660
	int dimm, size0, size1;
2661 2662
	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2663

2664
	if (pvt->fam == 0xf) {
2665
		/* K8 families < revF not supported yet */
2666
	       if (pvt->ext_model < K8_REV_F)
2667 2668 2669 2670 2671
			return;
	       else
		       WARN_ON(ctrl != 0);
	}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
	if (pvt->fam == 0x10) {
		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
							   : pvt->dbam0;
		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
				 pvt->csels[1].csbases :
				 pvt->csels[0].csbases;
	} else if (ctrl) {
		dbam = pvt->dbam0;
		dcsb = pvt->csels[1].csbases;
	}
2682 2683
	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
		 ctrl, dbam);
2684

2685 2686
	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);

2687 2688 2689 2690
	/* Dump memory sizes for DIMM and its CSROWs */
	for (dimm = 0; dimm < 4; dimm++) {

		size0 = 0;
2691
		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2692 2693 2694
			/*
			 * For F15m60h, we need multiplier for LRDIMM cs_size
			 * calculation. We pass dimm value to the dbam_to_cs
2695 2696 2697
			 * mapper so we can find the multiplier from the
			 * corresponding DCSM.
			 */
2698
			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2699 2700
						     DBAM_DIMM(dimm, dbam),
						     dimm);
2701 2702

		size1 = 0;
2703
		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2704
			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2705 2706
						     DBAM_DIMM(dimm, dbam),
						     dimm);
2707

2708
		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2709 2710
				dimm * 2,     size0,
				dimm * 2 + 1, size1);
2711 2712 2713
	}
}

2714
static struct amd64_family_type family_types[] = {
2715
	[K8_CPUS] = {
2716
		.ctl_name = "K8",
2717
		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2718
		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2719
		.max_mcs = 2,
2720
		.ops = {
2721 2722 2723
			.early_channel_count	= k8_early_channel_count,
			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
			.dbam_to_cs		= k8_dbam_to_chip_select,
2724 2725 2726
		}
	},
	[F10_CPUS] = {
2727
		.ctl_name = "F10h",
2728
		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2729
		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2730
		.max_mcs = 2,
2731
		.ops = {
2732
			.early_channel_count	= f1x_early_channel_count,
2733
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2734
			.dbam_to_cs		= f10_dbam_to_chip_select,
2735 2736 2737 2738
		}
	},
	[F15_CPUS] = {
		.ctl_name = "F15h",
2739
		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2740
		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2741
		.max_mcs = 2,
2742
		.ops = {
2743
			.early_channel_count	= f1x_early_channel_count,
2744
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2745
			.dbam_to_cs		= f15_dbam_to_chip_select,
2746 2747
		}
	},
2748 2749 2750
	[F15_M30H_CPUS] = {
		.ctl_name = "F15h_M30h",
		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2751
		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2752
		.max_mcs = 2,
2753 2754 2755 2756 2757 2758
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
2759 2760 2761
	[F15_M60H_CPUS] = {
		.ctl_name = "F15h_M60h",
		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2762
		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2763
		.max_mcs = 2,
2764 2765 2766 2767 2768 2769
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
		}
	},
2770 2771 2772
	[F16_CPUS] = {
		.ctl_name = "F16h",
		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2773
		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2774
		.max_mcs = 2,
2775 2776 2777 2778 2779 2780
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
2781 2782 2783
	[F16_M30H_CPUS] = {
		.ctl_name = "F16h_M30h",
		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2784
		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2785
		.max_mcs = 2,
2786 2787 2788 2789 2790 2791
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
2792 2793 2794 2795
	[F17_CPUS] = {
		.ctl_name = "F17h",
		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2796
		.max_mcs = 2,
2797 2798
		.ops = {
			.early_channel_count	= f17_early_channel_count,
2799
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2800 2801
		}
	},
2802 2803 2804 2805
	[F17_M10H_CPUS] = {
		.ctl_name = "F17h_M10h",
		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2806
		.max_mcs = 2,
2807 2808
		.ops = {
			.early_channel_count	= f17_early_channel_count,
2809
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2810 2811
		}
	},
2812 2813 2814 2815
	[F17_M30H_CPUS] = {
		.ctl_name = "F17h_M30h",
		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2816
		.max_mcs = 8,
2817 2818
		.ops = {
			.early_channel_count	= f17_early_channel_count,
2819
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2820 2821
		}
	},
2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
	[F17_M60H_CPUS] = {
		.ctl_name = "F17h_M60h",
		.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
		.max_mcs = 2,
		.ops = {
			.early_channel_count	= f17_early_channel_count,
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
		}
	},
2832 2833 2834 2835
	[F17_M70H_CPUS] = {
		.ctl_name = "F17h_M70h",
		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2836
		.max_mcs = 2,
2837 2838 2839 2840 2841
		.ops = {
			.early_channel_count	= f17_early_channel_count,
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
		}
	},
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
	[F19_CPUS] = {
		.ctl_name = "F19h",
		.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
		.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
		.max_mcs = 8,
		.ops = {
			.early_channel_count	= f17_early_channel_count,
			.dbam_to_cs		= f17_addr_mask_to_cs_size,
		}
	},
2852 2853
};

2854
/*
2855 2856 2857
 * These are tables of eigenvectors (one per line) which can be used for the
 * construction of the syndrome tables. The modified syndrome search algorithm
 * uses those to find the symbol in error and thus the DIMM.
2858
 *
2859
 * Algorithm courtesy of Ross LaFetra from AMD.
2860
 */
2861
static const u16 x4_vectors[] = {
2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
	0x2f57, 0x1afe, 0x66cc, 0xdd88,
	0x11eb, 0x3396, 0x7f4c, 0xeac8,
	0x0001, 0x0002, 0x0004, 0x0008,
	0x1013, 0x3032, 0x4044, 0x8088,
	0x106b, 0x30d6, 0x70fc, 0xe0a8,
	0x4857, 0xc4fe, 0x13cc, 0x3288,
	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
	0x15c1, 0x2a42, 0x89ac, 0x4758,
	0x2b03, 0x1602, 0x4f0c, 0xca08,
	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
	0x8ba7, 0x465e, 0x244c, 0x1cc8,
	0x2b87, 0x164e, 0x642c, 0xdc18,
	0x40b9, 0x80de, 0x1094, 0x20e8,
	0x27db, 0x1eb6, 0x9dac, 0x7b58,
	0x11c1, 0x2242, 0x84ac, 0x4c58,
	0x1be5, 0x2d7a, 0x5e34, 0xa718,
	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
	0x4c97, 0xc87e, 0x11fc, 0x33a8,
	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
	0x16b3, 0x3d62, 0x4f34, 0x8518,
	0x1e2f, 0x391a, 0x5cac, 0xf858,
	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
	0x4397, 0xc27e, 0x17fc, 0x3ea8,
	0x1617, 0x3d3e, 0x6464, 0xb8b8,
	0x23ff, 0x12aa, 0xab6c, 0x56d8,
	0x2dfb, 0x1ba6, 0x913c, 0x7328,
	0x185d, 0x2ca6, 0x7914, 0x9e28,
	0x171b, 0x3e36, 0x7d7c, 0xebe8,
	0x4199, 0x82ee, 0x19f4, 0x2e58,
	0x4807, 0xc40e, 0x130c, 0x3208,
	0x1905, 0x2e0a, 0x5804, 0xac08,
	0x213f, 0x132a, 0xadfc, 0x5ba8,
	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2898 2899
};

2900
static const u16 x8_vectors[] = {
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};

2922
static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2923
			   unsigned v_dim)
2924
{
2925 2926 2927 2928
	unsigned int i, err_sym;

	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
		u16 s = syndrome;
2929 2930
		unsigned v_idx =  err_sym * v_dim;
		unsigned v_end = (err_sym + 1) * v_dim;
2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942

		/* walk over all 16 bits of the syndrome */
		for (i = 1; i < (1U << 16); i <<= 1) {

			/* if bit is set in that eigenvector... */
			if (v_idx < v_end && vectors[v_idx] & i) {
				u16 ev_comp = vectors[v_idx++];

				/* ... and bit set in the modified syndrome, */
				if (s & i) {
					/* remove it. */
					s ^= ev_comp;
2943

2944 2945 2946
					if (!s)
						return err_sym;
				}
2947

2948 2949 2950 2951
			} else if (s & i)
				/* can't get to zero, move to next symbol */
				break;
		}
2952 2953
	}

2954
	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2955 2956
	return -1;
}
2957

2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
static int map_err_sym_to_channel(int err_sym, int sym_size)
{
	if (sym_size == 4)
		switch (err_sym) {
		case 0x20:
		case 0x21:
			return 0;
		case 0x22:
		case 0x23:
			return 1;
		default:
			return err_sym >> 4;
		}
	/* x8 symbols */
	else
		switch (err_sym) {
		/* imaginary bits not in a DIMM */
		case 0x10:
			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
					  err_sym);
			return -1;
		case 0x11:
			return 0;
		case 0x12:
			return 1;
		default:
			return err_sym >> 3;
		}
	return -1;
}

static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
{
	struct amd64_pvt *pvt = mci->pvt_info;
2992 2993
	int err_sym = -1;

2994
	if (pvt->ecc_sym_sz == 8)
2995 2996
		err_sym = decode_syndrome(syndrome, x8_vectors,
					  ARRAY_SIZE(x8_vectors),
2997 2998
					  pvt->ecc_sym_sz);
	else if (pvt->ecc_sym_sz == 4)
2999 3000
		err_sym = decode_syndrome(syndrome, x4_vectors,
					  ARRAY_SIZE(x4_vectors),
3001
					  pvt->ecc_sym_sz);
3002
	else {
3003
		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
3004
		return err_sym;
3005
	}
3006

3007
	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
3008 3009
}

3010
static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
3011
			    u8 ecc_type)
3012
{
3013 3014
	enum hw_event_mc_err_type err_type;
	const char *string;
3015

3016 3017 3018 3019
	if (ecc_type == 2)
		err_type = HW_EVENT_ERR_CORRECTED;
	else if (ecc_type == 1)
		err_type = HW_EVENT_ERR_UNCORRECTED;
3020 3021
	else if (ecc_type == 3)
		err_type = HW_EVENT_ERR_DEFERRED;
3022 3023
	else {
		WARN(1, "Something is rotten in the state of Denmark.\n");
3024 3025 3026
		return;
	}

3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037
	switch (err->err_code) {
	case DECODE_OK:
		string = "";
		break;
	case ERR_NODE:
		string = "Failed to map error addr to a node";
		break;
	case ERR_CSROW:
		string = "Failed to map error addr to a csrow";
		break;
	case ERR_CHANNEL:
3038 3039 3040 3041 3042 3043 3044
		string = "Unknown syndrome - possible error reporting race";
		break;
	case ERR_SYND:
		string = "MCA_SYND not valid - unknown syndrome and csrow";
		break;
	case ERR_NORM_ADDR:
		string = "Cannot decode normalized address";
3045 3046 3047 3048
		break;
	default:
		string = "WTF error";
		break;
3049
	}
3050 3051 3052 3053 3054

	edac_mc_handle_error(err_type, mci, 1,
			     err->page, err->offset, err->syndrome,
			     err->csrow, err->channel, -1,
			     string, "");
3055 3056
}

3057
static inline void decode_bus_error(int node_id, struct mce *m)
3058
{
3059 3060
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
3061
	u8 ecc_type = (m->status >> 45) & 0x3;
3062 3063
	u8 xec = XEC(m->status, 0x1f);
	u16 ec = EC(m->status);
3064 3065
	u64 sys_addr;
	struct err_info err;
3066

3067 3068 3069 3070 3071 3072
	mci = edac_mc_find(node_id);
	if (!mci)
		return;

	pvt = mci->pvt_info;

3073
	/* Bail out early if this was an 'observed' error */
3074
	if (PP(ec) == NBSL_PP_OBS)
3075
		return;
3076

3077 3078
	/* Do only ECC errors */
	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
3079 3080
		return;

3081 3082
	memset(&err, 0, sizeof(err));

3083
	sys_addr = get_error_address(pvt, m);
3084

3085
	if (ecc_type == 2)
3086 3087 3088 3089
		err.syndrome = extract_syndrome(m->status);

	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);

3090
	__log_ecc_error(mci, &err, ecc_type);
3091 3092
}

3093 3094 3095 3096
/*
 * To find the UMC channel represented by this bank we need to match on its
 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
 * IPID.
3097 3098 3099 3100
 *
 * Currently, we can derive the channel number by looking at the 6th nibble in
 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
 * number.
3101
 */
3102
static int find_umc_channel(struct mce *m)
3103
{
3104
	return (m->ipid & GENMASK(31, 0)) >> 20;
3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125
}

static void decode_umc_error(int node_id, struct mce *m)
{
	u8 ecc_type = (m->status >> 45) & 0x3;
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
	struct err_info err;
	u64 sys_addr;

	mci = edac_mc_find(node_id);
	if (!mci)
		return;

	pvt = mci->pvt_info;

	memset(&err, 0, sizeof(err));

	if (m->status & MCI_STATUS_DEFERRED)
		ecc_type = 3;

3126
	err.channel = find_umc_channel(m);
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143

	if (!(m->status & MCI_STATUS_SYNDV)) {
		err.err_code = ERR_SYND;
		goto log_error;
	}

	if (ecc_type == 2) {
		u8 length = (m->synd >> 18) & 0x3f;

		if (length)
			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
		else
			err.err_code = ERR_CHANNEL;
	}

	err.csrow = m->synd & 0x7;

3144 3145 3146 3147 3148 3149 3150
	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
		err.err_code = ERR_NORM_ADDR;
		goto log_error;
	}

	error_address_to_page_and_offset(sys_addr, &err);

3151 3152 3153 3154
log_error:
	__log_ecc_error(mci, &err, ecc_type);
}

3155
/*
3156 3157
 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3158
 * Reserve F0 and F6 on systems with a UMC.
3159
 */
3160 3161 3162 3163 3164 3165
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
	if (pvt->umc) {
		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
		if (!pvt->F0) {
3166
			edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
3167 3168 3169 3170 3171 3172 3173 3174
			return -ENODEV;
		}

		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
		if (!pvt->F6) {
			pci_dev_put(pvt->F0);
			pvt->F0 = NULL;

3175
			edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
3176 3177
			return -ENODEV;
		}
3178

3179 3180 3181
		if (!pci_ctl_dev)
			pci_ctl_dev = &pvt->F0->dev;

3182 3183 3184 3185 3186 3187 3188
		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));

		return 0;
	}

3189
	/* Reserve the ADDRESS MAP Device */
3190
	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3191
	if (!pvt->F1) {
3192
		edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
3193
		return -ENODEV;
3194 3195
	}

3196
	/* Reserve the DCT Device */
3197
	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3198
	if (!pvt->F2) {
3199 3200
		pci_dev_put(pvt->F1);
		pvt->F1 = NULL;
3201

3202
		edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
3203
		return -ENODEV;
3204
	}
3205

3206 3207 3208
	if (!pci_ctl_dev)
		pci_ctl_dev = &pvt->F2->dev;

3209 3210 3211
	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3212 3213 3214 3215

	return 0;
}

3216
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
3217
{
3218 3219 3220 3221 3222 3223 3224
	if (pvt->umc) {
		pci_dev_put(pvt->F0);
		pci_dev_put(pvt->F6);
	} else {
		pci_dev_put(pvt->F1);
		pci_dev_put(pvt->F2);
	}
3225 3226
}

3227 3228 3229 3230 3231 3232 3233
static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{
	pvt->ecc_sym_sz = 4;

	if (pvt->umc) {
		u8 i;

3234
		for_each_umc(i) {
3235
			/* Check enabled channels only: */
3236 3237 3238 3239 3240 3241 3242 3243
			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
					pvt->ecc_sym_sz = 16;
					return;
				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
					pvt->ecc_sym_sz = 8;
					return;
				}
3244 3245
			}
		}
3246
	} else if (pvt->fam >= 0x10) {
3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
		u32 tmp;

		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
		/* F16h has only DCT0, so no need to read dbam1. */
		if (pvt->fam != 0x16)
			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);

		/* F10h, revD and later can do x8 ECC too. */
		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
			pvt->ecc_sym_sz = 8;
	}
}

/*
 * Retrieve the hardware registers of the memory controller.
 */
static void __read_mc_regs_df(struct amd64_pvt *pvt)
{
	u8 nid = pvt->mc_node_id;
	struct amd64_umc *umc;
	u32 i, umc_base;

	/* Read registers from each UMC */
3270
	for_each_umc(i) {
3271 3272 3273 3274

		umc_base = get_umc_base(i);
		umc = &pvt->umc[i];

3275 3276
		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3277 3278
		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3279
		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3280 3281 3282
	}
}

3283 3284 3285 3286
/*
 * Retrieve the hardware registers of the memory controller (this includes the
 * 'Address Map' and 'Misc' device regs)
 */
3287
static void read_mc_regs(struct amd64_pvt *pvt)
3288
{
3289
	unsigned int range;
3290 3291 3292 3293
	u64 msr_val;

	/*
	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3294
	 * those are Read-As-Zero.
3295
	 */
3296
	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3297
	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
3298

3299
	/* Check first whether TOP_MEM2 is enabled: */
3300
	rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3301
	if (msr_val & BIT(21)) {
3302
		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3303
		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3304
	} else {
3305
		edac_dbg(0, "  TOP_MEM2 disabled\n");
3306 3307 3308 3309 3310 3311 3312 3313
	}

	if (pvt->umc) {
		__read_mc_regs_df(pvt);
		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);

		goto skip;
	}
3314

3315
	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3316

3317
	read_dram_ctl_register(pvt);
3318

3319 3320
	for (range = 0; range < DRAM_RANGES; range++) {
		u8 rw;
3321

3322 3323 3324 3325 3326 3327 3328
		/* read settings for this DRAM range */
		read_dram_base_limit_regs(pvt, range);

		rw = dram_rw(pvt, range);
		if (!rw)
			continue;

3329 3330 3331 3332
		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
			 range,
			 get_dram_base(pvt, range),
			 get_dram_limit(pvt, range));
3333

3334 3335 3336 3337 3338 3339
		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
			 (rw & 0x1) ? "R" : "-",
			 (rw & 0x2) ? "W" : "-",
			 dram_intlv_sel(pvt, range),
			 dram_dst_node(pvt, range));
3340 3341
	}

3342
	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3343
	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3344

3345
	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3346

3347 3348
	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3349

3350
	if (!dct_ganging_enabled(pvt)) {
3351 3352
		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3353
	}
3354

3355 3356 3357
skip:
	read_dct_base_mask(pvt);

3358 3359
	determine_memory_type(pvt);
	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
3360

3361
	determine_ecc_sym_sz(pvt);
3362 3363 3364 3365 3366 3367
}

/*
 * NOTE: CPU Revision Dependent code
 *
 * Input:
3368
 *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
 *	k8 private pointer to -->
 *			DRAM Bank Address mapping register
 *			node_id
 *			DCL register where dual_channel_active is
 *
 * The DBAM register consists of 4 sets of 4 bits each definitions:
 *
 * Bits:	CSROWs
 * 0-3		CSROWs 0 and 1
 * 4-7		CSROWs 2 and 3
 * 8-11		CSROWs 4 and 5
 * 12-15	CSROWs 6 and 7
 *
 * Values range from: 0 to 15
 * The meaning of the values depends on CPU revision and dual-channel state,
 * see relevant BKDG more info.
 *
 * The memory controller provides for total of only 8 CSROWs in its current
 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
 * single channel or two (2) DIMMs in dual channel mode.
 *
 * The following code logic collapses the various tables for CSROW based on CPU
 * revision.
 *
 * Returns:
 *	The number of PAGE_SIZE pages on the specified CSROW number it
 *	encompasses
 *
 */
3398
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3399
{
3400
	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3401 3402
	int csrow_nr = csrow_nr_orig;
	u32 cs_mode, nr_pages;
3403

3404
	if (!pvt->umc) {
3405
		csrow_nr >>= 1;
3406 3407 3408 3409
		cs_mode = DBAM_DIMM(csrow_nr, dbam);
	} else {
		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
	}
3410

3411 3412
	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
	nr_pages <<= 20 - PAGE_SHIFT;
3413

3414
	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3415
		    csrow_nr_orig, dct,  cs_mode);
3416
	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3417 3418 3419 3420

	return nr_pages;
}

3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
static int init_csrows_df(struct mem_ctl_info *mci)
{
	struct amd64_pvt *pvt = mci->pvt_info;
	enum edac_type edac_mode = EDAC_NONE;
	enum dev_type dev_type = DEV_UNKNOWN;
	struct dimm_info *dimm;
	int empty = 1;
	u8 umc, cs;

	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
		edac_mode = EDAC_S16ECD16ED;
		dev_type = DEV_X16;
	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
		edac_mode = EDAC_S8ECD8ED;
		dev_type = DEV_X8;
	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
		edac_mode = EDAC_S4ECD4ED;
		dev_type = DEV_X4;
	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
		edac_mode = EDAC_SECDED;
	}

	for_each_umc(umc) {
		for_each_chip_select(cs, umc, pvt) {
			if (!csrow_enabled(cs, umc, pvt))
				continue;

			empty = 0;
			dimm = mci->csrows[cs]->channels[umc]->dimm;

			edac_dbg(1, "MC node: %d, csrow: %d\n",
					pvt->mc_node_id, cs);

			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
			dimm->mtype = pvt->dram_type;
			dimm->edac_mode = edac_mode;
			dimm->dtype = dev_type;
Y
Yazen Ghannam 已提交
3458
			dimm->grain = 64;
3459 3460 3461 3462 3463 3464
		}
	}

	return empty;
}

3465 3466 3467 3468
/*
 * Initialize the array of csrow attribute instances, based on the values
 * from pci config hardware registers.
 */
3469
static int init_csrows(struct mem_ctl_info *mci)
3470
{
3471
	struct amd64_pvt *pvt = mci->pvt_info;
3472
	enum edac_type edac_mode = EDAC_NONE;
3473
	struct csrow_info *csrow;
3474
	struct dimm_info *dimm;
3475
	int i, j, empty = 1;
3476
	int nr_pages = 0;
3477
	u32 val;
3478

3479 3480
	if (pvt->umc)
		return init_csrows_df(mci);
3481

3482
	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3483

3484 3485 3486 3487 3488
	pvt->nbcfg = val;

	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
		 pvt->mc_node_id, val,
		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3489

3490 3491 3492
	/*
	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
	 */
3493
	for_each_chip_select(i, 0, pvt) {
3494 3495
		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
		bool row_dct1 = false;
3496

3497
		if (pvt->fam != 0xf)
3498 3499 3500
			row_dct1 = !!csrow_enabled(i, 1, pvt);

		if (!row_dct0 && !row_dct1)
3501 3502
			continue;

3503
		csrow = mci->csrows[i];
3504
		empty = 0;
3505 3506 3507 3508

		edac_dbg(1, "MC node: %d, csrow: %d\n",
			    pvt->mc_node_id, i);

3509
		if (row_dct0) {
3510
			nr_pages = get_csrow_nr_pages(pvt, 0, i);
3511 3512
			csrow->channels[0]->dimm->nr_pages = nr_pages;
		}
3513

3514
		/* K8 has only one DCT */
3515
		if (pvt->fam != 0xf && row_dct1) {
3516
			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3517 3518 3519 3520

			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
			nr_pages += row_dct1_pages;
		}
3521

3522
		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3523

3524
		/* Determine DIMM ECC mode: */
3525
		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3526 3527 3528 3529
			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
					? EDAC_S4ECD4ED
					: EDAC_SECDED;
		}
3530 3531

		for (j = 0; j < pvt->channel_count; j++) {
3532
			dimm = csrow->channels[j]->dimm;
3533
			dimm->mtype = pvt->dram_type;
3534
			dimm->edac_mode = edac_mode;
Y
Yazen Ghannam 已提交
3535
			dimm->grain = 64;
3536
		}
3537 3538 3539 3540
	}

	return empty;
}
3541

3542
/* get all cores on this DCT */
3543
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3544 3545 3546 3547
{
	int cpu;

	for_each_online_cpu(cpu)
3548
		if (topology_die_id(cpu) == nid)
3549 3550 3551 3552
			cpumask_set_cpu(cpu, mask);
}

/* check MCG_CTL on all the cpus on this node */
3553
static bool nb_mce_bank_enabled_on_node(u16 nid)
3554 3555
{
	cpumask_var_t mask;
3556
	int cpu, nbe;
3557 3558 3559
	bool ret = false;

	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3560
		amd64_warn("%s: Error allocating mask\n", __func__);
3561 3562 3563 3564 3565 3566 3567 3568
		return false;
	}

	get_cpus_on_this_dct_cpumask(mask, nid);

	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);

	for_each_cpu(cpu, mask) {
3569
		struct msr *reg = per_cpu_ptr(msrs, cpu);
3570
		nbe = reg->l & MSR_MCGCTL_NBE;
3571

3572 3573 3574
		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
			 cpu, reg->q,
			 (nbe ? "enabled" : "disabled"));
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585

		if (!nbe)
			goto out;
	}
	ret = true;

out:
	free_cpumask_var(mask);
	return ret;
}

3586
static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3587 3588
{
	cpumask_var_t cmask;
3589
	int cpu;
3590 3591

	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3592
		amd64_warn("%s: error allocating mask\n", __func__);
P
Pan Bian 已提交
3593
		return -ENOMEM;
3594 3595
	}

3596
	get_cpus_on_this_dct_cpumask(cmask, nid);
3597 3598 3599 3600 3601

	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);

	for_each_cpu(cpu, cmask) {

3602 3603
		struct msr *reg = per_cpu_ptr(msrs, cpu);

3604
		if (on) {
3605
			if (reg->l & MSR_MCGCTL_NBE)
3606
				s->flags.nb_mce_enable = 1;
3607

3608
			reg->l |= MSR_MCGCTL_NBE;
3609 3610
		} else {
			/*
3611
			 * Turn off NB MCE reporting only when it was off before
3612
			 */
3613
			if (!s->flags.nb_mce_enable)
3614
				reg->l &= ~MSR_MCGCTL_NBE;
3615 3616 3617 3618 3619 3620 3621 3622 3623
		}
	}
	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);

	free_cpumask_var(cmask);

	return 0;
}

3624
static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3625
				       struct pci_dev *F3)
3626
{
3627
	bool ret = true;
B
Borislav Petkov 已提交
3628
	u32 value, mask = 0x3;		/* UECC/CECC enable */
3629

3630 3631 3632 3633 3634
	if (toggle_ecc_err_reporting(s, nid, ON)) {
		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
		return false;
	}

B
Borislav Petkov 已提交
3635
	amd64_read_pci_cfg(F3, NBCTL, &value);
3636

3637 3638
	s->old_nbctl   = value & mask;
	s->nbctl_valid = true;
3639 3640

	value |= mask;
B
Borislav Petkov 已提交
3641
	amd64_write_pci_cfg(F3, NBCTL, value);
3642

3643
	amd64_read_pci_cfg(F3, NBCFG, &value);
3644

3645 3646
	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3647

3648
	if (!(value & NBCFG_ECC_ENABLE)) {
3649
		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3650

3651
		s->flags.nb_ecc_prev = 0;
3652

3653
		/* Attempt to turn on DRAM ECC Enable */
3654 3655
		value |= NBCFG_ECC_ENABLE;
		amd64_write_pci_cfg(F3, NBCFG, value);
3656

3657
		amd64_read_pci_cfg(F3, NBCFG, &value);
3658

3659
		if (!(value & NBCFG_ECC_ENABLE)) {
3660 3661
			amd64_warn("Hardware rejected DRAM ECC enable,"
				   "check memory DIMM configuration.\n");
3662
			ret = false;
3663
		} else {
3664
			amd64_info("Hardware accepted DRAM ECC Enable\n");
3665
		}
3666
	} else {
3667
		s->flags.nb_ecc_prev = 1;
3668
	}
3669

3670 3671
	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3672

3673
	return ret;
3674 3675
}

3676
static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3677
					struct pci_dev *F3)
3678
{
B
Borislav Petkov 已提交
3679 3680
	u32 value, mask = 0x3;		/* UECC/CECC enable */

3681
	if (!s->nbctl_valid)
3682 3683
		return;

B
Borislav Petkov 已提交
3684
	amd64_read_pci_cfg(F3, NBCTL, &value);
3685
	value &= ~mask;
3686
	value |= s->old_nbctl;
3687

B
Borislav Petkov 已提交
3688
	amd64_write_pci_cfg(F3, NBCTL, value);
3689

3690 3691
	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
	if (!s->flags.nb_ecc_prev) {
3692 3693 3694
		amd64_read_pci_cfg(F3, NBCFG, &value);
		value &= ~NBCFG_ECC_ENABLE;
		amd64_write_pci_cfg(F3, NBCFG, value);
3695 3696 3697
	}

	/* restore the NB Enable MCGCTL bit */
3698
	if (toggle_ecc_err_reporting(s, nid, OFF))
3699
		amd64_warn("Error restoring NB MCGCTL settings!\n");
3700 3701
}

3702
static bool ecc_enabled(struct amd64_pvt *pvt)
3703
{
3704
	u16 nid = pvt->mc_node_id;
3705
	bool nb_mce_en = false;
3706 3707
	u8 ecc_en = 0, i;
	u32 value;
3708

3709 3710
	if (boot_cpu_data.x86 >= 0x17) {
		u8 umc_en_mask = 0, ecc_en_mask = 0;
3711
		struct amd64_umc *umc;
3712

3713
		for_each_umc(i) {
3714
			umc = &pvt->umc[i];
3715 3716

			/* Only check enabled UMCs. */
3717
			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3718 3719 3720 3721
				continue;

			umc_en_mask |= BIT(i);

3722
			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3723 3724 3725 3726 3727 3728
				ecc_en_mask |= BIT(i);
		}

		/* Check whether at least one UMC is enabled: */
		if (umc_en_mask)
			ecc_en = umc_en_mask == ecc_en_mask;
3729 3730
		else
			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3731 3732 3733 3734

		/* Assume UMC MCA banks are enabled. */
		nb_mce_en = true;
	} else {
3735
		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3736

3737 3738 3739 3740
		ecc_en = !!(value & NBCFG_ECC_ENABLE);

		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
		if (!nb_mce_en)
3741
			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3742 3743 3744
				     MSR_IA32_MCG_CTL, nid);
	}

3745
	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3746

3747
	if (!ecc_en || !nb_mce_en)
3748
		return false;
3749 3750
	else
		return true;
3751 3752
}

3753 3754 3755
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
3756
	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3757

3758
	for_each_umc(i) {
3759 3760 3761
		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3762 3763 3764

			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3765 3766 3767 3768 3769 3770 3771
		}
	}

	/* Set chipkill only if ECC is enabled: */
	if (ecc_en) {
		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;

3772 3773 3774 3775
		if (!cpk_en)
			return;

		if (dev_x4)
3776
			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3777 3778 3779 3780
		else if (dev_x16)
			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
		else
			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3781 3782 3783
	}
}

3784
static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3785 3786 3787 3788 3789 3790
{
	struct amd64_pvt *pvt = mci->pvt_info;

	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
	mci->edac_ctl_cap	= EDAC_FLAG_NONE;

3791 3792 3793 3794 3795
	if (pvt->umc) {
		f17h_determine_edac_ctl_cap(mci, pvt);
	} else {
		if (pvt->nbcap & NBCAP_SECDED)
			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3796

3797 3798 3799
		if (pvt->nbcap & NBCAP_CHIPKILL)
			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
	}
3800

3801
	mci->edac_cap		= determine_edac_cap(pvt);
3802
	mci->mod_name		= EDAC_MOD_STR;
3803
	mci->ctl_name		= fam_type->ctl_name;
3804
	mci->dev_name		= pci_name(pvt->F3);
3805 3806 3807
	mci->ctl_page_to_phys	= NULL;

	/* memory scrubber interface */
3808 3809
	mci->set_sdram_scrub_rate = set_scrub_rate;
	mci->get_sdram_scrub_rate = get_scrub_rate;
3810 3811
}

3812 3813 3814
/*
 * returns a pointer to the family descriptor on success, NULL otherwise.
 */
3815
static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3816
{
3817
	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3818
	pvt->stepping	= boot_cpu_data.x86_stepping;
3819 3820 3821 3822
	pvt->model	= boot_cpu_data.x86_model;
	pvt->fam	= boot_cpu_data.x86;

	switch (pvt->fam) {
3823
	case 0xf:
3824 3825
		fam_type	= &family_types[K8_CPUS];
		pvt->ops	= &family_types[K8_CPUS].ops;
3826
		break;
3827

3828
	case 0x10:
3829 3830
		fam_type	= &family_types[F10_CPUS];
		pvt->ops	= &family_types[F10_CPUS].ops;
3831 3832 3833
		break;

	case 0x15:
3834
		if (pvt->model == 0x30) {
3835 3836
			fam_type = &family_types[F15_M30H_CPUS];
			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3837
			break;
3838 3839 3840 3841
		} else if (pvt->model == 0x60) {
			fam_type = &family_types[F15_M60H_CPUS];
			pvt->ops = &family_types[F15_M60H_CPUS].ops;
			break;
3842 3843 3844 3845 3846 3847
		/* Richland is only client */
		} else if (pvt->model == 0x13) {
			return NULL;
		} else {
			fam_type	= &family_types[F15_CPUS];
			pvt->ops	= &family_types[F15_CPUS].ops;
3848
		}
3849 3850
		break;

3851
	case 0x16:
3852 3853 3854 3855 3856
		if (pvt->model == 0x30) {
			fam_type = &family_types[F16_M30H_CPUS];
			pvt->ops = &family_types[F16_M30H_CPUS].ops;
			break;
		}
3857 3858
		fam_type	= &family_types[F16_CPUS];
		pvt->ops	= &family_types[F16_CPUS].ops;
3859 3860
		break;

3861
	case 0x17:
3862 3863 3864 3865
		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
			fam_type = &family_types[F17_M10H_CPUS];
			pvt->ops = &family_types[F17_M10H_CPUS].ops;
			break;
3866 3867 3868 3869
		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
			fam_type = &family_types[F17_M30H_CPUS];
			pvt->ops = &family_types[F17_M30H_CPUS].ops;
			break;
3870 3871 3872 3873
		} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
			fam_type = &family_types[F17_M60H_CPUS];
			pvt->ops = &family_types[F17_M60H_CPUS].ops;
			break;
3874 3875 3876 3877
		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
			fam_type = &family_types[F17_M70H_CPUS];
			pvt->ops = &family_types[F17_M70H_CPUS].ops;
			break;
3878
		}
3879
		fallthrough;
P
Pu Wen 已提交
3880
	case 0x18:
3881 3882
		fam_type	= &family_types[F17_CPUS];
		pvt->ops	= &family_types[F17_CPUS].ops;
P
Pu Wen 已提交
3883 3884 3885

		if (pvt->fam == 0x18)
			family_types[F17_CPUS].ctl_name = "F18h";
3886 3887
		break;

3888
	case 0x19:
3889 3890 3891 3892 3893 3894
		if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
			fam_type = &family_types[F17_M70H_CPUS];
			pvt->ops = &family_types[F17_M70H_CPUS].ops;
			fam_type->ctl_name = "F19h_M20h";
			break;
		}
3895 3896 3897 3898 3899
		fam_type	= &family_types[F19_CPUS];
		pvt->ops	= &family_types[F19_CPUS].ops;
		family_types[F19_CPUS].ctl_name = "F19h";
		break;

3900
	default:
3901
		amd64_err("Unsupported family!\n");
3902
		return NULL;
3903
	}
3904 3905

	return fam_type;
3906 3907
}

3908 3909
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
3910
	&dbg_group,
3911
	&inj_group,
3912 3913 3914 3915
#endif
	NULL
};

3916
static int hw_info_get(struct amd64_pvt *pvt)
3917
{
3918
	u16 pci_id1, pci_id2;
3919
	int ret;
3920

3921
	if (pvt->fam >= 0x17) {
3922
		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3923 3924
		if (!pvt->umc)
			return -ENOMEM;
3925 3926 3927 3928 3929 3930 3931 3932

		pci_id1 = fam_type->f0_id;
		pci_id2 = fam_type->f6_id;
	} else {
		pci_id1 = fam_type->f1_id;
		pci_id2 = fam_type->f2_id;
	}

3933 3934 3935
	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
	if (ret)
		return ret;
3936

3937
	read_mc_regs(pvt);
3938

3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
	return 0;
}

static void hw_info_put(struct amd64_pvt *pvt)
{
	if (pvt->F0 || pvt->F1)
		free_mc_sibling_devs(pvt);

	kfree(pvt->umc);
}

static int init_one_instance(struct amd64_pvt *pvt)
{
	struct mem_ctl_info *mci = NULL;
	struct edac_mc_layer layers[2];
	int ret = -EINVAL;

3956 3957 3958
	/*
	 * We need to determine how many memory channels there are. Then use
	 * that information for calculating the size of the dynamic instance
3959
	 * tables in the 'mci' structure.
3960 3961 3962
	 */
	pvt->channel_count = pvt->ops->early_channel_count(pvt);
	if (pvt->channel_count < 0)
3963
		return ret;
3964 3965

	ret = -ENOMEM;
3966 3967 3968 3969
	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
	layers[0].size = pvt->csels[0].b_cnt;
	layers[0].is_virt_csrow = true;
	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3970 3971 3972 3973 3974 3975

	/*
	 * Always allocate two channels since we can have setups with DIMMs on
	 * only one channel. Also, this simplifies handling later for the price
	 * of a couple of KBs tops.
	 */
3976
	layers[1].size = fam_type->max_mcs;
3977
	layers[1].is_virt_csrow = false;
3978

3979
	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3980
	if (!mci)
3981
		return ret;
3982 3983

	mci->pvt_info = pvt;
3984
	mci->pdev = &pvt->F3->dev;
3985

3986
	setup_mci_misc_attrs(mci);
3987 3988

	if (init_csrows(mci))
3989 3990 3991
		mci->edac_cap = EDAC_FLAG_NONE;

	ret = -ENODEV;
3992
	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3993
		edac_dbg(1, "failed edac_mc_add_mc()\n");
3994 3995
		edac_mc_free(mci);
		return ret;
3996 3997 3998 3999 4000
	}

	return 0;
}

4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
static bool instance_has_memory(struct amd64_pvt *pvt)
{
	bool cs_enabled = false;
	int cs = 0, dct = 0;

	for (dct = 0; dct < fam_type->max_mcs; dct++) {
		for_each_chip_select(cs, dct, pvt)
			cs_enabled |= csrow_enabled(cs, dct, pvt);
	}

	return cs_enabled;
}

4014
static int probe_one_instance(unsigned int nid)
4015
{
4016
	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4017
	struct amd64_pvt *pvt = NULL;
4018
	struct ecc_settings *s;
4019
	int ret;
4020

4021 4022 4023
	ret = -ENOMEM;
	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
	if (!s)
4024
		goto err_out;
4025 4026 4027

	ecc_stngs[nid] = s;

4028 4029 4030 4031 4032 4033 4034
	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
	if (!pvt)
		goto err_settings;

	pvt->mc_node_id	= nid;
	pvt->F3 = F3;

4035
	ret = -ENODEV;
4036 4037 4038 4039 4040 4041 4042 4043
	fam_type = per_family_init(pvt);
	if (!fam_type)
		goto err_enable;

	ret = hw_info_get(pvt);
	if (ret < 0)
		goto err_enable;

4044 4045 4046 4047 4048 4049
	ret = 0;
	if (!instance_has_memory(pvt)) {
		amd64_info("Node %d: No DIMMs detected.\n", nid);
		goto err_enable;
	}

4050
	if (!ecc_enabled(pvt)) {
4051
		ret = -ENODEV;
4052 4053 4054 4055

		if (!ecc_enable_override)
			goto err_enable;

4056 4057 4058 4059 4060
		if (boot_cpu_data.x86 >= 0x17) {
			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
			goto err_enable;
		} else
			amd64_warn("Forcing ECC on!\n");
4061 4062 4063 4064 4065

		if (!enable_ecc_error_reporting(s, nid, F3))
			goto err_enable;
	}

4066
	ret = init_one_instance(pvt);
4067
	if (ret < 0) {
4068
		amd64_err("Error probing instance: %d\n", nid);
4069 4070 4071

		if (boot_cpu_data.x86 < 0x17)
			restore_ecc_error_reporting(s, nid, F3);
4072 4073

		goto err_enable;
4074
	}
4075

4076 4077 4078 4079 4080 4081
	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
		     (pvt->fam == 0xf ?
				(pvt->ext_model >= K8_REV_F  ? "revF or later "
							     : "revE or earlier ")
				 : ""), pvt->mc_node_id);

4082 4083
	dump_misc_regs(pvt);

4084
	return ret;
4085 4086

err_enable:
4087 4088 4089 4090
	hw_info_put(pvt);
	kfree(pvt);

err_settings:
4091 4092 4093 4094 4095
	kfree(s);
	ecc_stngs[nid] = NULL;

err_out:
	return ret;
4096 4097
}

4098
static void remove_one_instance(unsigned int nid)
4099
{
4100 4101
	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
	struct ecc_settings *s = ecc_stngs[nid];
4102 4103
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
4104 4105

	/* Remove from EDAC CORE tracking list */
4106
	mci = edac_mc_del_mc(&F3->dev);
4107 4108 4109 4110 4111
	if (!mci)
		return;

	pvt = mci->pvt_info;

4112
	restore_ecc_error_reporting(s, nid, F3);
4113

4114 4115
	kfree(ecc_stngs[nid]);
	ecc_stngs[nid] = NULL;
4116

4117
	/* Free the EDAC CORE resources */
4118 4119
	mci->pvt_info = NULL;

4120
	hw_info_put(pvt);
4121
	kfree(pvt);
4122 4123 4124
	edac_mc_free(mci);
}

4125
static void setup_pci_device(void)
4126
{
4127
	if (pci_ctl)
4128 4129
		return;

4130
	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4131 4132 4133
	if (!pci_ctl) {
		pr_warn("%s(): Unable to create PCI control\n", __func__);
		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4134 4135 4136
	}
}

4137
static const struct x86_cpu_id amd64_cpuids[] = {
4138 4139 4140 4141 4142 4143 4144
	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
4145 4146 4147 4148
	{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);

4149 4150
static int __init amd64_edac_init(void)
{
4151
	const char *owner;
4152
	int err = -ENODEV;
4153
	int i;
4154

4155 4156 4157 4158
	owner = edac_get_owner();
	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
		return -EBUSY;

4159 4160 4161
	if (!x86_match_cpu(amd64_cpuids))
		return -ENODEV;

4162
	if (amd_cache_northbridges() < 0)
4163
		return -ENODEV;
4164

4165 4166
	opstate_init();

4167
	err = -ENOMEM;
K
Kees Cook 已提交
4168
	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4169
	if (!ecc_stngs)
4170
		goto err_free;
4171

4172
	msrs = msrs_alloc();
4173
	if (!msrs)
4174
		goto err_free;
4175

4176 4177 4178
	for (i = 0; i < amd_nb_num(); i++) {
		err = probe_one_instance(i);
		if (err) {
4179 4180 4181
			/* unwind properly */
			while (--i >= 0)
				remove_one_instance(i);
4182

4183 4184
			goto err_pci;
		}
4185
	}
4186

4187 4188 4189 4190 4191
	if (!edac_has_mcs()) {
		err = -ENODEV;
		goto err_pci;
	}

4192 4193 4194 4195 4196 4197
	/* register stuff with EDAC MCE */
	if (boot_cpu_data.x86 >= 0x17)
		amd_register_ecc_decoder(decode_umc_error);
	else
		amd_register_ecc_decoder(decode_bus_error);

4198
	setup_pci_device();
T
Tomasz Pala 已提交
4199 4200 4201 4202 4203

#ifdef CONFIG_X86_32
	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif

4204 4205
	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);

4206
	return 0;
4207

4208
err_pci:
4209 4210
	pci_ctl_dev = NULL;

4211 4212
	msrs_free(msrs);
	msrs = NULL;
4213

4214 4215 4216 4217
err_free:
	kfree(ecc_stngs);
	ecc_stngs = NULL;

4218 4219 4220 4221 4222
	return err;
}

static void __exit amd64_edac_exit(void)
{
4223 4224
	int i;

4225 4226
	if (pci_ctl)
		edac_pci_release_generic_ctl(pci_ctl);
4227

4228 4229 4230 4231 4232 4233
	/* unregister from EDAC MCE */
	if (boot_cpu_data.x86 >= 0x17)
		amd_unregister_ecc_decoder(decode_umc_error);
	else
		amd_unregister_ecc_decoder(decode_bus_error);

4234 4235
	for (i = 0; i < amd_nb_num(); i++)
		remove_one_instance(i);
4236

4237 4238 4239
	kfree(ecc_stngs);
	ecc_stngs = NULL;

4240 4241
	pci_ctl_dev = NULL;

4242 4243
	msrs_free(msrs);
	msrs = NULL;
4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256
}

module_init(amd64_edac_init);
module_exit(amd64_edac_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
		"Dave Peterson, Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
		EDAC_AMD64_VERSION);

module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");