amd64_edac.c 76.9 KB
Newer Older
1
#include "amd64_edac.h"
2
#include <asm/amd_nb.h>
3

4
static struct edac_pci_ctl_info *pci_ctl;
5 6 7 8 9 10 11 12 13 14 15

static int report_gart_errors;
module_param(report_gart_errors, int, 0644);

/*
 * Set by command line parameter. If BIOS has enabled the ECC, this override is
 * cleared to prevent re-enabling the hardware by this driver.
 */
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);

16
static struct msr __percpu *msrs;
17

18
/* Per-node stuff */
19
static struct ecc_settings **ecc_stngs;
20

21 22 23 24 25 26 27
/*
 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
 * or higher value'.
 *
 *FIXME: Produce a better mapping/linearisation.
 */
28
static const struct scrubrate {
29 30 31
       u32 scrubval;           /* bit pattern for scrub rate */
       u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
	{ 0x01, 1600000000UL},
	{ 0x02, 800000000UL},
	{ 0x03, 400000000UL},
	{ 0x04, 200000000UL},
	{ 0x05, 100000000UL},
	{ 0x06, 50000000UL},
	{ 0x07, 25000000UL},
	{ 0x08, 12284069UL},
	{ 0x09, 6274509UL},
	{ 0x0A, 3121951UL},
	{ 0x0B, 1560975UL},
	{ 0x0C, 781440UL},
	{ 0x0D, 390720UL},
	{ 0x0E, 195300UL},
	{ 0x0F, 97650UL},
	{ 0x10, 48854UL},
	{ 0x11, 24427UL},
	{ 0x12, 12213UL},
	{ 0x13, 6101UL},
	{ 0x14, 3051UL},
	{ 0x15, 1523UL},
	{ 0x16, 761UL},
	{ 0x00, 0UL},        /* scrubbing off */
};

57 58
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
			       u32 *val, const char *func)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
{
	int err = 0;

	err = pci_read_config_dword(pdev, offset, val);
	if (err)
		amd64_warn("%s: error reading F%dx%03x.\n",
			   func, PCI_FUNC(pdev->devfn), offset);

	return err;
}

int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
				u32 val, const char *func)
{
	int err = 0;

	err = pci_write_config_dword(pdev, offset, val);
	if (err)
		amd64_warn("%s: error writing to F%dx%03x.\n",
			   func, PCI_FUNC(pdev->devfn), offset);

	return err;
}

83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * Select DCT to which PCI cfg accesses are routed
 */
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{
	u32 reg = 0;

	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
	reg &= (pvt->model == 0x30) ? ~3 : ~1;
	reg |= dct;
	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}

96 97 98 99
/*
 *
 * Depending on the family, F2 DCT reads need special handling:
 *
100
 * K8: has a single DCT only and no address offsets >= 0x100
101 102 103 104 105
 *
 * F10h: each DCT has its own set of regs
 *	DCT0 -> F2x040..
 *	DCT1 -> F2x140..
 *
106
 * F16h: has only 1 DCT
107 108
 *
 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
109
 */
110 111
static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
					 int offset, u32 *val)
112
{
113 114 115 116 117
	switch (pvt->fam) {
	case 0xf:
		if (dct || offset >= 0x100)
			return -EINVAL;
		break;
118

119 120 121 122 123 124 125 126 127
	case 0x10:
		if (dct) {
			/*
			 * Note: If ganging is enabled, barring the regs
			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
			 * return 0. (cf. Section 2.8.1 F10h BKDG)
			 */
			if (dct_ganging_enabled(pvt))
				return 0;
128

129 130 131
			offset += 0x100;
		}
		break;
132

133 134 135 136 137 138 139 140
	case 0x15:
		/*
		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
		 * We should select which DCT we access using F1x10C[DctCfgSel]
		 */
		dct = (dct && pvt->model == 0x30) ? 3 : dct;
		f15h_select_dct(pvt, dct);
		break;
141

142 143 144 145
	case 0x16:
		if (dct)
			return -EINVAL;
		break;
146

147 148
	default:
		break;
149
	}
150
	return amd64_read_pci_cfg(pvt->F2, offset, val);
151 152
}

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
/*
 * Memory scrubber control interface. For K8, memory scrubbing is handled by
 * hardware and can involve L2 cache, dcache as well as the main memory. With
 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
 * functionality.
 *
 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
 * bytes/sec for the setting.
 *
 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
 * other archs, we might not have access to the caches directly.
 */

/*
 * scan the scrub rate mapping table for a close or matching bandwidth value to
 * issue. If requested is too big, then use last maximum value found.
 */
171
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
172 173 174 175 176 177 178 179 180
{
	u32 scrubval;
	int i;

	/*
	 * map the configured rate (new_bw) to a value specific to the AMD64
	 * memory controller and apply to register. Search for the first
	 * bandwidth entry that is greater or equal than the setting requested
	 * and program that. If at last entry, turn off DRAM scrubbing.
181 182 183
	 *
	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
	 * by falling back to the last element in scrubrates[].
184
	 */
185
	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
186 187 188 189
		/*
		 * skip scrub rates which aren't recommended
		 * (see F10 BKDG, F3x58)
		 */
190
		if (scrubrates[i].scrubval < min_rate)
191 192 193 194 195 196 197 198
			continue;

		if (scrubrates[i].bandwidth <= new_bw)
			break;
	}

	scrubval = scrubrates[i].scrubval;

199 200 201 202 203 204 205 206
	if (pvt->fam == 0x15 && pvt->model == 0x60) {
		f15h_select_dct(pvt, 0);
		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
		f15h_select_dct(pvt, 1);
		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
	} else {
		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
	}
207

208 209 210
	if (scrubval)
		return scrubrates[i].bandwidth;

211 212 213
	return 0;
}

214
static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
215 216
{
	struct amd64_pvt *pvt = mci->pvt_info;
217
	u32 min_scrubrate = 0x5;
218

219
	if (pvt->fam == 0xf)
220 221
		min_scrubrate = 0x0;

222 223 224 225
	if (pvt->fam == 0x15) {
		/* Erratum #505 */
		if (pvt->model < 0x10)
			f15h_select_dct(pvt, 0);
226

227 228 229 230
		if (pvt->model == 0x60)
			min_scrubrate = 0x6;
	}
	return __set_scrub_rate(pvt, bw, min_scrubrate);
231 232
}

233
static int get_scrub_rate(struct mem_ctl_info *mci)
234 235 236
{
	struct amd64_pvt *pvt = mci->pvt_info;
	u32 scrubval = 0;
237
	int i, retval = -EINVAL;
238

239 240 241 242
	if (pvt->fam == 0x15) {
		/* Erratum #505 */
		if (pvt->model < 0x10)
			f15h_select_dct(pvt, 0);
243

244 245 246 247
		if (pvt->model == 0x60)
			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
	} else
		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
248 249 250

	scrubval = scrubval & 0x001F;

251
	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
252
		if (scrubrates[i].scrubval == scrubval) {
253
			retval = scrubrates[i].bandwidth;
254 255 256
			break;
		}
	}
257
	return retval;
258 259
}

260
/*
261 262
 * returns true if the SysAddr given by sys_addr matches the
 * DRAM base/limit associated with node_id
263
 */
264
static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
265
{
266
	u64 addr;
267 268 269 270 271 272 273 274 275

	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
	 * all ones if the most significant implemented address bit is 1.
	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
	 * Application Programming.
	 */
	addr = sys_addr & 0x000000ffffffffffull;

276 277
	return ((addr >= get_dram_base(pvt, nid)) &&
		(addr <= get_dram_limit(pvt, nid)));
278 279 280 281 282 283 284 285 286 287 288 289
}

/*
 * Attempt to map a SysAddr to a node. On success, return a pointer to the
 * mem_ctl_info structure for the node that the SysAddr maps to.
 *
 * On failure, return NULL.
 */
static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
						u64 sys_addr)
{
	struct amd64_pvt *pvt;
290
	u8 node_id;
291 292 293 294 295 296 297 298 299 300 301 302 303
	u32 intlv_en, bits;

	/*
	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
	 * 3.4.4.2) registers to map the SysAddr to a node ID.
	 */
	pvt = mci->pvt_info;

	/*
	 * The value of this field should be the same for all DRAM Base
	 * registers.  Therefore we arbitrarily choose to read it from the
	 * register for node 0.
	 */
304
	intlv_en = dram_intlv_en(pvt, 0);
305 306

	if (intlv_en == 0) {
307
		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
308
			if (base_limit_match(pvt, sys_addr, node_id))
309
				goto found;
310
		}
311
		goto err_no_match;
312 313
	}

314 315 316
	if (unlikely((intlv_en != 0x01) &&
		     (intlv_en != 0x03) &&
		     (intlv_en != 0x07))) {
317
		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
318 319 320 321 322 323
		return NULL;
	}

	bits = (((u32) sys_addr) >> 12) & intlv_en;

	for (node_id = 0; ; ) {
324
		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
325 326
			break;	/* intlv_sel field matches */

327
		if (++node_id >= DRAM_RANGES)
328 329 330 331
			goto err_no_match;
	}

	/* sanity test for sys_addr */
332
	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
333 334 335
		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
			   "range for node %d with node interleaving enabled.\n",
			   __func__, sys_addr, node_id);
336 337 338 339
		return NULL;
	}

found:
340
	return edac_mc_find((int)node_id);
341 342

err_no_match:
343 344
	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
		 (unsigned long)sys_addr);
345 346 347

	return NULL;
}
348 349

/*
350 351
 * compute the CS base address of the @csrow on the DRAM controller @dct.
 * For details see F2x[5C:40] in the processor's BKDG
352
 */
353 354
static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
				 u64 *base, u64 *mask)
355
{
356 357
	u64 csbase, csmask, base_bits, mask_bits;
	u8 addr_shift;
358

359
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
360 361
		csbase		= pvt->csels[dct].csbases[csrow];
		csmask		= pvt->csels[dct].csmasks[csrow];
362 363
		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
364
		addr_shift	= 4;
365 366

	/*
367 368 369 370 371
	 * F16h and F15h, models 30h and later need two addr_shift values:
	 * 8 for high and 6 for low (cf. F16h BKDG).
	 */
	} else if (pvt->fam == 0x16 ||
		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
372 373 374
		csbase          = pvt->csels[dct].csbases[csrow];
		csmask          = pvt->csels[dct].csmasks[csrow >> 1];

375 376
		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
377 378 379

		*mask = ~0ULL;
		/* poke holes for the csmask */
380 381
		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
			   (GENMASK_ULL(30, 19) << 8));
382

383 384
		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
385 386

		return;
387 388 389 390
	} else {
		csbase		= pvt->csels[dct].csbases[csrow];
		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
		addr_shift	= 8;
391

392
		if (pvt->fam == 0x15)
393 394
			base_bits = mask_bits =
				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
395
		else
396 397
			base_bits = mask_bits =
				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
398
	}
399

400
	*base  = (csbase & base_bits) << addr_shift;
401

402 403 404 405 406
	*mask  = ~0ULL;
	/* poke holes for the csmask */
	*mask &= ~(mask_bits << addr_shift);
	/* OR them in */
	*mask |= (csmask & mask_bits) << addr_shift;
407 408
}

409 410 411
#define for_each_chip_select(i, dct, pvt) \
	for (i = 0; i < pvt->csels[dct].b_cnt; i++)

412 413 414
#define chip_select_base(i, dct, pvt) \
	pvt->csels[dct].csbases[i]

415 416 417
#define for_each_chip_select_mask(i, dct, pvt) \
	for (i = 0; i < pvt->csels[dct].m_cnt; i++)

418 419 420 421 422 423 424 425 426 427 428 429
/*
 * @input_addr is an InputAddr associated with the node given by mci. Return the
 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
 */
static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
{
	struct amd64_pvt *pvt;
	int csrow;
	u64 base, mask;

	pvt = mci->pvt_info;

430 431
	for_each_chip_select(csrow, 0, pvt) {
		if (!csrow_enabled(csrow, 0, pvt))
432 433
			continue;

434 435 436
		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);

		mask = ~mask;
437 438

		if ((input_addr & mask) == (base & mask)) {
439 440 441
			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
				 (unsigned long)input_addr, csrow,
				 pvt->mc_node_id);
442 443 444 445

			return csrow;
		}
	}
446 447
	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
		 (unsigned long)input_addr, pvt->mc_node_id);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

	return -1;
}

/*
 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
 * for the node represented by mci. Info is passed back in *hole_base,
 * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
 * info is invalid. Info may be invalid for either of the following reasons:
 *
 * - The revision of the node is not E or greater.  In this case, the DRAM Hole
 *   Address Register does not exist.
 *
 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
 *   indicating that its contents are not valid.
 *
 * The values passed back in *hole_base, *hole_offset, and *hole_size are
 * complete 32-bit values despite the fact that the bitfields in the DHAR
 * only represent bits 31-24 of the base and offset values.
 */
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
			     u64 *hole_offset, u64 *hole_size)
{
	struct amd64_pvt *pvt = mci->pvt_info;

	/* only revE and later have the DRAM Hole Address Register */
474
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
475 476
		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
			 pvt->ext_model, pvt->mc_node_id);
477 478 479
		return 1;
	}

480
	/* valid for Fam10h and above */
481
	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
482
		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
483 484 485
		return 1;
	}

486
	if (!dhar_valid(pvt)) {
487 488
		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
			 pvt->mc_node_id);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
		return 1;
	}

	/* This node has Memory Hoisting */

	/* +------------------+--------------------+--------------------+-----
	 * | memory           | DRAM hole          | relocated          |
	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
	 * |                  |                    | DRAM hole          |
	 * |                  |                    | [0x100000000,      |
	 * |                  |                    |  (0x100000000+     |
	 * |                  |                    |   (0xffffffff-x))] |
	 * +------------------+--------------------+--------------------+-----
	 *
	 * Above is a diagram of physical memory showing the DRAM hole and the
	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
	 * starts at address x (the base address) and extends through address
	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
	 * addresses in the hole so that they start at 0x100000000.
	 */

510 511
	*hole_base = dhar_base(pvt);
	*hole_size = (1ULL << 32) - *hole_base;
512

513 514
	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
					: k8_dhar_offset(pvt);
515

516 517 518
	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
		 pvt->mc_node_id, (unsigned long)*hole_base,
		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
519 520 521 522 523

	return 0;
}
EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
/*
 * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
 * assumed that sys_addr maps to the node given by mci.
 *
 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
 * then it is also involved in translating a SysAddr to a DramAddr. Sections
 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
 * These parts of the documentation are unclear. I interpret them as follows:
 *
 * When node n receives a SysAddr, it processes the SysAddr as follows:
 *
 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
 *    Limit registers for node n. If the SysAddr is not within the range
 *    specified by the base and limit values, then node n ignores the Sysaddr
 *    (since it does not map to node n). Otherwise continue to step 2 below.
 *
 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
 *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
 *    the range of relocated addresses (starting at 0x100000000) from the DRAM
 *    hole. If not, skip to step 3 below. Else get the value of the
 *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
 *    offset defined by this value from the SysAddr.
 *
 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
 *    Base register for node n. To obtain the DramAddr, subtract the base
 *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
 */
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
555
	struct amd64_pvt *pvt = mci->pvt_info;
556
	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
557
	int ret;
558

559
	dram_base = get_dram_base(pvt, pvt->mc_node_id);
560 561 562 563

	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
				      &hole_size);
	if (!ret) {
564 565
		if ((sys_addr >= (1ULL << 32)) &&
		    (sys_addr < ((1ULL << 32) + hole_size))) {
566 567 568
			/* use DHAR to translate SysAddr to DramAddr */
			dram_addr = sys_addr - hole_offset;

569 570 571
			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
				 (unsigned long)sys_addr,
				 (unsigned long)dram_addr);
572 573 574 575 576 577 578 579 580 581 582 583 584 585

			return dram_addr;
		}
	}

	/*
	 * Translate the SysAddr to a DramAddr as shown near the start of
	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
	 * Programmer's Manual Volume 1 Application Programming.
	 */
586
	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
587

588 589
	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
		 (unsigned long)sys_addr, (unsigned long)dram_addr);
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	return dram_addr;
}

/*
 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
 * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
 * for node interleaving.
 */
static int num_node_interleave_bits(unsigned intlv_en)
{
	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
	int n;

	BUG_ON(intlv_en > 7);
	n = intlv_shift_table[intlv_en];
	return n;
}

/* Translate the DramAddr given by @dram_addr to an InputAddr. */
static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
	struct amd64_pvt *pvt;
	int intlv_shift;
	u64 input_addr;

	pvt = mci->pvt_info;

	/*
	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
	 * concerning translating a DramAddr to an InputAddr.
	 */
621
	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
622
	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
623
		      (dram_addr & 0xfff);
624

625 626 627
	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
		 intlv_shift, (unsigned long)dram_addr,
		 (unsigned long)input_addr);
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642

	return input_addr;
}

/*
 * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
 * assumed that @sys_addr maps to the node given by mci.
 */
static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
	u64 input_addr;

	input_addr =
	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));

M
Masanari Iida 已提交
643
	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
644
		 (unsigned long)sys_addr, (unsigned long)input_addr);
645 646 647 648 649 650

	return input_addr;
}

/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
651
						    struct err_info *err)
652
{
653 654
	err->page = (u32) (error_address >> PAGE_SHIFT);
	err->offset = ((u32) error_address) & ~PAGE_MASK;
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
}

/*
 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
 * of a node that detected an ECC memory error.  mci represents the node that
 * the error address maps to (possibly different from the node that detected
 * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
 * error.
 */
static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
{
	int csrow;

	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));

	if (csrow == -1)
672 673
		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
				  "address 0x%lx\n", (unsigned long)sys_addr);
674 675
	return csrow;
}
676

677
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
678 679 680 681 682

/*
 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
 * are ECC capable.
 */
683
static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
684
{
685
	u8 bit;
686
	unsigned long edac_cap = EDAC_FLAG_NONE;
687

688
	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
689 690 691
		? 19
		: 17;

692
	if (pvt->dclr0 & BIT(bit))
693 694 695 696 697
		edac_cap = EDAC_FLAG_SECDED;

	return edac_cap;
}

698
static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
699

700
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
701
{
702
	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
703

704 705 706 707 708 709 710 711 712 713 714 715 716
	if (pvt->dram_type == MEM_LRDDR3) {
		u32 dcsm = pvt->csels[chan].csmasks[0];
		/*
		 * It's assumed all LRDIMMs in a DCT are going to be of
		 * same 'type' until proven otherwise. So, use a cs
		 * value of '0' here to get dcsm value.
		 */
		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
	}

	edac_dbg(1, "All DIMMs support ECC:%s\n",
		    (dclr & BIT(19)) ? "yes" : "no");

717

718 719
	edac_dbg(1, "  PAR/ERR parity: %s\n",
		 (dclr & BIT(8)) ?  "enabled" : "disabled");
720

721
	if (pvt->fam == 0x10)
722 723
		edac_dbg(1, "  DCT 128bit mode width: %s\n",
			 (dclr & BIT(11)) ?  "128b" : "64b");
724

725 726 727 728 729
	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
		 (dclr & BIT(12)) ?  "yes" : "no",
		 (dclr & BIT(13)) ?  "yes" : "no",
		 (dclr & BIT(14)) ?  "yes" : "no",
		 (dclr & BIT(15)) ?  "yes" : "no");
730 731
}

732
/* Display and decode various NB registers for debug purposes. */
733
static void dump_misc_regs(struct amd64_pvt *pvt)
734
{
735
	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
736

737 738
	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
739

740 741 742
	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
743

744
	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
745

746
	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
747

748 749
	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
		 pvt->dhar, dhar_base(pvt),
750 751
		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
				   : f10_dhar_offset(pvt));
752

753
	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
754

755
	debug_display_dimm_sizes(pvt, 0);
756

757
	/* everything below this point is Fam10h and above */
758
	if (pvt->fam == 0xf)
759
		return;
760

761
	debug_display_dimm_sizes(pvt, 1);
762

763
	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
764

765
	/* Only if NOT ganged does dclr1 have valid info */
766
	if (!dct_ganging_enabled(pvt))
767
		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
768 769
}

770
/*
771
 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
772
 */
773
static void prep_chip_selects(struct amd64_pvt *pvt)
774
{
775
	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
776 777
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
778
	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
779 780
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
781
	} else {
782 783
		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
784 785 786 787
	}
}

/*
788
 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
789
 */
790
static void read_dct_base_mask(struct amd64_pvt *pvt)
791
{
792
	int cs;
793

794
	prep_chip_selects(pvt);
795

796
	for_each_chip_select(cs, 0, pvt) {
797 798
		int reg0   = DCSB0 + (cs * 4);
		int reg1   = DCSB1 + (cs * 4);
799 800
		u32 *base0 = &pvt->csels[0].csbases[cs];
		u32 *base1 = &pvt->csels[1].csbases[cs];
801

802
		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
803 804
			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
				 cs, *base0, reg0);
805

806
		if (pvt->fam == 0xf)
807
			continue;
808

809
		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
810
			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
811 812
				 cs, *base1, (pvt->fam == 0x10) ? reg1
								: reg0);
813 814
	}

815
	for_each_chip_select_mask(cs, 0, pvt) {
816 817
		int reg0   = DCSM0 + (cs * 4);
		int reg1   = DCSM1 + (cs * 4);
818 819
		u32 *mask0 = &pvt->csels[0].csmasks[cs];
		u32 *mask1 = &pvt->csels[1].csmasks[cs];
820

821
		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
822 823
			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
				 cs, *mask0, reg0);
824

825
		if (pvt->fam == 0xf)
826
			continue;
827

828
		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
829
			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
830 831
				 cs, *mask1, (pvt->fam == 0x10) ? reg1
								: reg0);
832 833 834
	}
}

835
static void determine_memory_type(struct amd64_pvt *pvt)
836
{
837
	u32 dram_ctrl, dcsm;
838

839 840 841 842 843 844 845 846 847
	switch (pvt->fam) {
	case 0xf:
		if (pvt->ext_model >= K8_REV_F)
			goto ddr3;

		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
		return;

	case 0x10:
848
		if (pvt->dchr0 & DDR3_MODE)
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
			goto ddr3;

		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
		return;

	case 0x15:
		if (pvt->model < 0x60)
			goto ddr3;

		/*
		 * Model 0x60h needs special handling:
		 *
		 * We use a Chip Select value of '0' to obtain dcsm.
		 * Theoretically, it is possible to populate LRDIMMs of different
		 * 'Rank' value on a DCT. But this is not the common case. So,
		 * it's reasonable to assume all DIMMs are going to be of same
		 * 'type' until proven otherwise.
		 */
		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
		dcsm = pvt->csels[0].csmasks[0];

		if (((dram_ctrl >> 8) & 0x7) == 0x2)
			pvt->dram_type = MEM_DDR4;
		else if (pvt->dclr0 & BIT(16))
			pvt->dram_type = MEM_DDR3;
		else if (dcsm & 0x3)
			pvt->dram_type = MEM_LRDDR3;
876
		else
877
			pvt->dram_type = MEM_RDDR3;
878

879 880 881 882 883 884 885 886 887 888
		return;

	case 0x16:
		goto ddr3;

	default:
		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
		pvt->dram_type = MEM_EMPTY;
	}
	return;
889

890 891
ddr3:
	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
892 893
}

894
/* Get the number of DCT channels the memory controller is using. */
895 896
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
897
	int flag;
898

899
	if (pvt->ext_model >= K8_REV_F)
900
		/* RevF (NPT) and later */
901
		flag = pvt->dclr0 & WIDTH_128;
902
	else
903 904 905 906 907 908 909 910 911
		/* RevE and earlier */
		flag = pvt->dclr0 & REVE_WIDTH_128;

	/* not used */
	pvt->dclr1 = 0;

	return (flag) ? 2 : 1;
}

912
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
913
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
914
{
915 916
	u16 mce_nid = amd_get_nb_id(m->extcpu);
	struct mem_ctl_info *mci;
917 918
	u8 start_bit = 1;
	u8 end_bit   = 47;
919 920 921 922 923 924 925
	u64 addr;

	mci = edac_mc_find(mce_nid);
	if (!mci)
		return 0;

	pvt = mci->pvt_info;
926

927
	if (pvt->fam == 0xf) {
928 929 930 931
		start_bit = 3;
		end_bit   = 39;
	}

932
	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
933 934 935 936

	/*
	 * Erratum 637 workaround
	 */
937
	if (pvt->fam == 0x15) {
938 939
		u64 cc6_base, tmp_addr;
		u32 tmp;
940
		u8 intlv_en;
941

942
		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
943 944 945 946 947 948 949
			return addr;


		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
		intlv_en = tmp >> 21 & 0x7;

		/* add [47:27] + 3 trailing bits */
950
		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
951 952 953 954 955 956 957 958

		/* reverse and add DramIntlvEn */
		cc6_base |= intlv_en ^ 0x7;

		/* pin at [47:24] */
		cc6_base <<= 24;

		if (!intlv_en)
959
			return cc6_base | (addr & GENMASK_ULL(23, 0));
960 961 962 963

		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);

							/* faster log2 */
964
		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
965 966

		/* OR DramIntlvSel into bits [14:12] */
967
		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
968 969

		/* add remaining [11:0] bits from original MC4_ADDR */
970
		tmp_addr |= addr & GENMASK_ULL(11, 0);
971 972 973 974 975

		return cc6_base | tmp_addr;
	}

	return addr;
976 977
}

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
static struct pci_dev *pci_get_related_function(unsigned int vendor,
						unsigned int device,
						struct pci_dev *related)
{
	struct pci_dev *dev = NULL;

	while ((dev = pci_get_device(vendor, device, dev))) {
		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
		    (dev->bus->number == related->bus->number) &&
		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
			break;
	}

	return dev;
}

994
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
995
{
996
	struct amd_northbridge *nb;
997 998
	struct pci_dev *f1 = NULL;
	unsigned int pci_func;
999
	int off = range << 3;
1000
	u32 llim;
1001

1002 1003
	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1004

1005
	if (pvt->fam == 0xf)
1006
		return;
1007

1008 1009
	if (!dram_rw(pvt, range))
		return;
1010

1011 1012
	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1013

1014
	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1015
	if (pvt->fam != 0x15)
1016
		return;
1017

1018 1019 1020
	nb = node_to_amd_nb(dram_dst_node(pvt, range));
	if (WARN_ON(!nb))
		return;
1021

1022 1023 1024 1025 1026 1027
	if (pvt->model == 0x60)
		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
	else if (pvt->model == 0x30)
		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
	else
		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1028 1029

	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1030 1031
	if (WARN_ON(!f1))
		return;
1032

1033
	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1034

1035
	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1036

1037 1038
				    /* {[39:27],111b} */
	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1039

1040
	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1041

1042 1043 1044 1045
				    /* [47:40] */
	pvt->ranges[range].lim.hi |= llim >> 13;

	pci_dev_put(f1);
1046 1047
}

1048
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1049
				    struct err_info *err)
1050
{
1051
	struct amd64_pvt *pvt = mci->pvt_info;
1052

1053
	error_address_to_page_and_offset(sys_addr, err);
1054 1055 1056 1057 1058

	/*
	 * Find out which node the error address belongs to. This may be
	 * different from the node that detected the error.
	 */
1059 1060
	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
	if (!err->src_mci) {
1061 1062
		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
			     (unsigned long)sys_addr);
1063
		err->err_code = ERR_NODE;
1064 1065 1066 1067
		return;
	}

	/* Now map the sys_addr to a CSROW */
1068 1069 1070
	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
	if (err->csrow < 0) {
		err->err_code = ERR_CSROW;
1071 1072 1073
		return;
	}

1074
	/* CHIPKILL enabled */
1075
	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1076 1077
		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
		if (err->channel < 0) {
1078 1079 1080 1081 1082
			/*
			 * Syndrome didn't map, so we don't know which of the
			 * 2 DIMMs is in error. So we need to ID 'both' of them
			 * as suspect.
			 */
1083
			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1084
				      "possible error reporting race\n",
1085 1086
				      err->syndrome);
			err->err_code = ERR_CHANNEL;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
			return;
		}
	} else {
		/*
		 * non-chipkill ecc mode
		 *
		 * The k8 documentation is unclear about how to determine the
		 * channel number when using non-chipkill memory.  This method
		 * was obtained from email communication with someone at AMD.
		 * (Wish the email was placed in this comment - norsk)
		 */
1098
		err->channel = ((sys_addr & BIT(3)) != 0);
1099 1100 1101
	}
}

1102
static int ddr2_cs_size(unsigned i, bool dct_width)
1103
{
1104
	unsigned shift = 0;
1105

1106 1107 1108 1109
	if (i <= 2)
		shift = i;
	else if (!(i & 0x1))
		shift = i >> 1;
1110
	else
1111
		shift = (i + 1) >> 1;
1112

1113 1114 1115 1116
	return 128 << (shift + !!dct_width);
}

static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1117
				  unsigned cs_mode, int cs_mask_nr)
1118 1119 1120 1121 1122 1123 1124 1125
{
	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;

	if (pvt->ext_model >= K8_REV_F) {
		WARN_ON(cs_mode > 11);
		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
	}
	else if (pvt->ext_model >= K8_REV_D) {
1126
		unsigned diff;
1127 1128
		WARN_ON(cs_mode > 10);

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
		/*
		 * the below calculation, besides trying to win an obfuscated C
		 * contest, maps cs_mode values to DIMM chip select sizes. The
		 * mappings are:
		 *
		 * cs_mode	CS size (mb)
		 * =======	============
		 * 0		32
		 * 1		64
		 * 2		128
		 * 3		128
		 * 4		256
		 * 5		512
		 * 6		256
		 * 7		512
		 * 8		1024
		 * 9		1024
		 * 10		2048
		 *
		 * Basically, it calculates a value with which to shift the
		 * smallest CS size of 32MB.
		 *
		 * ddr[23]_cs_size have a similar purpose.
		 */
		diff = cs_mode/3 + (unsigned)(cs_mode > 5);

		return 32 << (cs_mode - diff);
1156 1157 1158 1159 1160
	}
	else {
		WARN_ON(cs_mode > 6);
		return 32 << cs_mode;
	}
1161 1162
}

1163 1164 1165 1166 1167 1168 1169 1170
/*
 * Get the number of DCT channels in use.
 *
 * Return:
 *	number of Memory Channels in operation
 * Pass back:
 *	contents of the DCL0_LOW register
 */
1171
static int f1x_early_channel_count(struct amd64_pvt *pvt)
1172
{
1173
	int i, j, channels = 0;
1174

1175
	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1176
	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1177
		return 2;
1178 1179

	/*
1180 1181 1182
	 * Need to check if in unganged mode: In such, there are 2 channels,
	 * but they are not in 128 bit mode and thus the above 'dclr0' status
	 * bit will be OFF.
1183 1184 1185 1186
	 *
	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
	 * their CSEnable bit on. If so, then SINGLE DIMM case.
	 */
1187
	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1188

1189 1190 1191 1192 1193
	/*
	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
	 * is more than just one DIMM present in unganged mode. Need to check
	 * both controllers since DIMMs can be placed in either one.
	 */
1194 1195
	for (i = 0; i < 2; i++) {
		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1196

1197 1198 1199 1200 1201 1202
		for (j = 0; j < 4; j++) {
			if (DBAM_DIMM(j, dbam) > 0) {
				channels++;
				break;
			}
		}
1203 1204
	}

1205 1206 1207
	if (channels > 2)
		channels = 2;

1208
	amd64_info("MCT channel count: %d\n", channels);
1209 1210 1211 1212

	return channels;
}

1213
static int ddr3_cs_size(unsigned i, bool dct_width)
1214
{
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	unsigned shift = 0;
	int cs_size = 0;

	if (i == 0 || i == 3 || i == 4)
		cs_size = -1;
	else if (i <= 2)
		shift = i;
	else if (i == 12)
		shift = 7;
	else if (!(i & 0x1))
		shift = i >> 1;
	else
		shift = (i + 1) >> 1;

	if (cs_size != -1)
		cs_size = (128 * (1 << !!dct_width)) << shift;

	return cs_size;
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
{
	unsigned shift = 0;
	int cs_size = 0;

	if (i < 4 || i == 6)
		cs_size = -1;
	else if (i == 12)
		shift = 7;
	else if (!(i & 0x1))
		shift = i >> 1;
	else
		shift = (i + 1) >> 1;

	if (cs_size != -1)
		cs_size = rank_multiply * (128 << shift);

	return cs_size;
}

static int ddr4_cs_size(unsigned i)
{
	int cs_size = 0;

	if (i == 0)
		cs_size = -1;
	else if (i == 1)
		cs_size = 1024;
	else
		/* Min cs_size = 1G */
		cs_size = 1024 * (1 << (i >> 1));

	return cs_size;
}

1270
static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1271
				   unsigned cs_mode, int cs_mask_nr)
1272 1273 1274 1275
{
	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;

	WARN_ON(cs_mode > 11);
1276 1277

	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1278
		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1279
	else
1280 1281 1282 1283 1284 1285 1286
		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}

/*
 * F15h supports only 64bit DCT interfaces
 */
static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1287
				   unsigned cs_mode, int cs_mask_nr)
1288 1289
{
	WARN_ON(cs_mode > 12);
1290

1291
	return ddr3_cs_size(cs_mode, false);
1292 1293
}

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
/* F15h M60h supports DDR4 mapping as well.. */
static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
					unsigned cs_mode, int cs_mask_nr)
{
	int cs_size;
	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];

	WARN_ON(cs_mode > 12);

	if (pvt->dram_type == MEM_DDR4) {
		if (cs_mode > 9)
			return -1;

		cs_size = ddr4_cs_size(cs_mode);
	} else if (pvt->dram_type == MEM_LRDDR3) {
		unsigned rank_multiply = dcsm & 0xf;

		if (rank_multiply == 3)
			rank_multiply = 4;
		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
	} else {
		/* Minimum cs size is 512mb for F15hM60h*/
		if (cs_mode == 0x1)
			return -1;

		cs_size = ddr3_cs_size(cs_mode, false);
	}

	return cs_size;
}

1325
/*
1326
 * F16h and F15h model 30h have only limited cs_modes.
1327 1328
 */
static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1329
				unsigned cs_mode, int cs_mask_nr)
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
{
	WARN_ON(cs_mode > 12);

	if (cs_mode == 6 || cs_mode == 8 ||
	    cs_mode == 9 || cs_mode == 12)
		return -1;
	else
		return ddr3_cs_size(cs_mode, false);
}

1340
static void read_dram_ctl_register(struct amd64_pvt *pvt)
1341 1342
{

1343
	if (pvt->fam == 0xf)
1344 1345
		return;

1346
	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1347 1348
		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1349

1350 1351
		edac_dbg(0, "  DCTs operate in %s mode\n",
			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1352 1353

		if (!dct_ganging_enabled(pvt))
1354 1355
			edac_dbg(0, "  Address range split per DCT: %s\n",
				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1356

1357 1358 1359
		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1360

1361 1362 1363 1364
		edac_dbg(0, "  channel interleave: %s, "
			 "interleave bits selector: 0x%x\n",
			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
			 dct_sel_interleave_addr(pvt));
1365 1366
	}

1367
	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1368 1369
}

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
/*
 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
 * 2.10.12 Memory Interleaving Modes).
 */
static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
				     u8 intlv_en, int num_dcts_intlv,
				     u32 dct_sel)
{
	u8 channel = 0;
	u8 select;

	if (!(intlv_en))
		return (u8)(dct_sel);

	if (num_dcts_intlv == 2) {
		select = (sys_addr >> 8) & 0x3;
		channel = select ? 0x3 : 0;
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	} else if (num_dcts_intlv == 4) {
		u8 intlv_addr = dct_sel_interleave_addr(pvt);
		switch (intlv_addr) {
		case 0x4:
			channel = (sys_addr >> 8) & 0x3;
			break;
		case 0x5:
			channel = (sys_addr >> 9) & 0x3;
			break;
		}
	}
1398 1399 1400
	return channel;
}

1401
/*
1402
 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1403 1404
 * Interleaving Modes.
 */
1405
static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1406
				bool hi_range_sel, u8 intlv_en)
1407
{
1408
	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1409 1410

	if (dct_ganging_enabled(pvt))
1411
		return 0;
1412

1413 1414
	if (hi_range_sel)
		return dct_sel_high;
1415

1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
	/*
	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
	 */
	if (dct_interleave_enabled(pvt)) {
		u8 intlv_addr = dct_sel_interleave_addr(pvt);

		/* return DCT select function: 0=DCT0, 1=DCT1 */
		if (!intlv_addr)
			return sys_addr >> 6 & 1;

		if (intlv_addr & 0x2) {
			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1428
			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1429 1430 1431 1432

			return ((sys_addr >> shift) & 1) ^ temp;
		}

1433 1434 1435 1436 1437 1438
		if (intlv_addr & 0x4) {
			u8 shift = intlv_addr & 0x1 ? 9 : 8;

			return (sys_addr >> shift) & 1;
		}

1439 1440 1441 1442 1443
		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
	}

	if (dct_high_range_enabled(pvt))
		return ~dct_sel_high & 1;
1444 1445 1446 1447

	return 0;
}

1448
/* Convert the sys_addr to the normalized DCT address */
1449
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1450 1451
				 u64 sys_addr, bool hi_rng,
				 u32 dct_sel_base_addr)
1452 1453
{
	u64 chan_off;
1454 1455
	u64 dram_base		= get_dram_base(pvt, range);
	u64 hole_off		= f10_dhar_offset(pvt);
1456
	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1457

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	if (hi_rng) {
		/*
		 * if
		 * base address of high range is below 4Gb
		 * (bits [47:27] at [31:11])
		 * DRAM address space on this DCT is hoisted above 4Gb	&&
		 * sys_addr > 4Gb
		 *
		 *	remove hole offset from sys_addr
		 * else
		 *	remove high range offset from sys_addr
		 */
		if ((!(dct_sel_base_addr >> 16) ||
		     dct_sel_base_addr < dhar_base(pvt)) &&
1472
		    dhar_valid(pvt) &&
1473
		    (sys_addr >= BIT_64(32)))
1474
			chan_off = hole_off;
1475 1476 1477
		else
			chan_off = dct_sel_base_off;
	} else {
1478 1479 1480 1481 1482 1483 1484 1485 1486
		/*
		 * if
		 * we have a valid hole		&&
		 * sys_addr > 4Gb
		 *
		 *	remove hole
		 * else
		 *	remove dram base to normalize to DCT address
		 */
1487
		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1488
			chan_off = hole_off;
1489
		else
1490
			chan_off = dram_base;
1491 1492
	}

1493
	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1494 1495 1496 1497 1498 1499
}

/*
 * checks if the csrow passed in is marked as SPARED, if so returns the new
 * spare row
 */
1500
static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1501
{
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
	int tmp_cs;

	if (online_spare_swap_done(pvt, dct) &&
	    csrow == online_spare_bad_dramcs(pvt, dct)) {

		for_each_chip_select(tmp_cs, dct, pvt) {
			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
				csrow = tmp_cs;
				break;
			}
		}
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	}
	return csrow;
}

/*
 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
 *
 * Return:
 *	-EINVAL:  NOT FOUND
 *	0..csrow = Chip-Select Row
 */
1525
static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1526 1527 1528
{
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
1529
	u64 cs_base, cs_mask;
1530 1531 1532
	int cs_found = -EINVAL;
	int csrow;

1533
	mci = edac_mc_find(nid);
1534 1535 1536 1537 1538
	if (!mci)
		return cs_found;

	pvt = mci->pvt_info;

1539
	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1540

1541 1542
	for_each_chip_select(csrow, dct, pvt) {
		if (!csrow_enabled(csrow, dct, pvt))
1543 1544
			continue;

1545
		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1546

1547 1548
		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
			 csrow, cs_base, cs_mask);
1549

1550
		cs_mask = ~cs_mask;
1551

1552 1553
		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
			 (in_addr & cs_mask), (cs_base & cs_mask));
1554

1555
		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1556 1557 1558 1559
			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
				cs_found =  csrow;
				break;
			}
1560
			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1561

1562
			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1563 1564 1565 1566 1567 1568
			break;
		}
	}
	return cs_found;
}

1569 1570 1571 1572 1573
/*
 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
 * swapped with a region located at the bottom of memory so that the GPU can use
 * the interleaved region and thus two channels.
 */
1574
static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1575 1576 1577
{
	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;

1578
	if (pvt->fam == 0x10) {
1579
		/* only revC3 and revE have that feature */
1580
		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1581 1582 1583
			return sys_addr;
	}

1584
	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602

	if (!(swap_reg & 0x1))
		return sys_addr;

	swap_base	= (swap_reg >> 3) & 0x7f;
	swap_limit	= (swap_reg >> 11) & 0x7f;
	rgn_size	= (swap_reg >> 20) & 0x7f;
	tmp_addr	= sys_addr >> 27;

	if (!(sys_addr >> 34) &&
	    (((tmp_addr >= swap_base) &&
	     (tmp_addr <= swap_limit)) ||
	     (tmp_addr < rgn_size)))
		return sys_addr ^ (u64)swap_base << 27;

	return sys_addr;
}

1603
/* For a given @dram_range, check if @sys_addr falls within it. */
1604
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1605
				  u64 sys_addr, int *chan_sel)
1606
{
1607
	int cs_found = -EINVAL;
1608
	u64 chan_addr;
1609
	u32 dct_sel_base;
1610
	u8 channel;
1611
	bool high_range = false;
1612

1613
	u8 node_id    = dram_dst_node(pvt, range);
1614
	u8 intlv_en   = dram_intlv_en(pvt, range);
1615
	u32 intlv_sel = dram_intlv_sel(pvt, range);
1616

1617 1618
	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
		 range, sys_addr, get_dram_limit(pvt, range));
1619

1620 1621 1622 1623 1624 1625 1626 1627
	if (dhar_valid(pvt) &&
	    dhar_base(pvt) <= sys_addr &&
	    sys_addr < BIT_64(32)) {
		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
			    sys_addr);
		return -EINVAL;
	}

1628
	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1629 1630
		return -EINVAL;

1631
	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1632

1633 1634 1635 1636 1637 1638 1639 1640 1641
	dct_sel_base = dct_sel_baseaddr(pvt);

	/*
	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
	 * select between DCT0 and DCT1.
	 */
	if (dct_high_range_enabled(pvt) &&
	   !dct_ganging_enabled(pvt) &&
	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1642
		high_range = true;
1643

1644
	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1645

1646
	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1647
					  high_range, dct_sel_base);
1648

1649 1650 1651 1652
	/* Remove node interleaving, see F1x120 */
	if (intlv_en)
		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
			    (chan_addr & 0xfff);
1653

1654
	/* remove channel interleave */
1655 1656 1657
	if (dct_interleave_enabled(pvt) &&
	   !dct_high_range_enabled(pvt) &&
	   !dct_ganging_enabled(pvt)) {
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671

		if (dct_sel_interleave_addr(pvt) != 1) {
			if (dct_sel_interleave_addr(pvt) == 0x3)
				/* hash 9 */
				chan_addr = ((chan_addr >> 10) << 9) |
					     (chan_addr & 0x1ff);
			else
				/* A[6] or hash 6 */
				chan_addr = ((chan_addr >> 7) << 6) |
					     (chan_addr & 0x3f);
		} else
			/* A[12] */
			chan_addr = ((chan_addr >> 13) << 12) |
				     (chan_addr & 0xfff);
1672 1673
	}

1674
	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1675

1676
	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1677

1678
	if (cs_found >= 0)
1679
		*chan_sel = channel;
1680

1681 1682 1683
	return cs_found;
}

1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
					u64 sys_addr, int *chan_sel)
{
	int cs_found = -EINVAL;
	int num_dcts_intlv = 0;
	u64 chan_addr, chan_offset;
	u64 dct_base, dct_limit;
	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;

	u64 dhar_offset		= f10_dhar_offset(pvt);
	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
	u8 node_id		= dram_dst_node(pvt, range);
	u8 intlv_en		= dram_intlv_en(pvt, range);

	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);

	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);

	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
		 range, sys_addr, get_dram_limit(pvt, range));

	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
	    !(get_dram_limit(pvt, range) >= sys_addr))
		return -EINVAL;

	if (dhar_valid(pvt) &&
	    dhar_base(pvt) <= sys_addr &&
	    sys_addr < BIT_64(32)) {
		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
			    sys_addr);
		return -EINVAL;
	}

	/* Verify sys_addr is within DCT Range. */
1721 1722
	dct_base = (u64) dct_sel_baseaddr(pvt);
	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1723 1724

	if (!(dct_cont_base_reg & BIT(0)) &&
1725 1726
	    !(dct_base <= (sys_addr >> 27) &&
	      dct_limit >= (sys_addr >> 27)))
1727 1728 1729 1730 1731 1732 1733 1734
		return -EINVAL;

	/* Verify number of dct's that participate in channel interleaving. */
	num_dcts_intlv = (int) hweight8(intlv_en);

	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
		return -EINVAL;

1735 1736 1737 1738 1739
	if (pvt->model >= 0x60)
		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
	else
		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
						     num_dcts_intlv, dct_sel);
1740 1741

	/* Verify we stay within the MAX number of channels allowed */
1742
	if (channel > 3)
1743 1744 1745 1746 1747 1748 1749 1750
		return -EINVAL;

	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));

	/* Get normalized DCT addr */
	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
		chan_offset = dhar_offset;
	else
1751
		chan_offset = dct_base << 27;
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780

	chan_addr = sys_addr - chan_offset;

	/* remove channel interleave */
	if (num_dcts_intlv == 2) {
		if (intlv_addr == 0x4)
			chan_addr = ((chan_addr >> 9) << 8) |
						(chan_addr & 0xff);
		else if (intlv_addr == 0x5)
			chan_addr = ((chan_addr >> 10) << 9) |
						(chan_addr & 0x1ff);
		else
			return -EINVAL;

	} else if (num_dcts_intlv == 4) {
		if (intlv_addr == 0x4)
			chan_addr = ((chan_addr >> 10) << 8) |
							(chan_addr & 0xff);
		else if (intlv_addr == 0x5)
			chan_addr = ((chan_addr >> 11) << 9) |
							(chan_addr & 0x1ff);
		else
			return -EINVAL;
	}

	if (dct_offset_en) {
		amd64_read_pci_cfg(pvt->F1,
				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
				   &tmp);
1781
		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	}

	f15h_select_dct(pvt, channel);

	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);

	/*
	 * Find Chip select:
	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
	 * there is support for 4 DCT's, but only 2 are currently functional.
	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
	 * pvt->csels[1]. So we need to use '1' here to get correct info.
	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
	 */
	alias_channel =  (channel == 3) ? 1 : channel;

	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);

	if (cs_found >= 0)
		*chan_sel = alias_channel;

	return cs_found;
}

static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
					u64 sys_addr,
					int *chan_sel)
1809
{
1810 1811
	int cs_found = -EINVAL;
	unsigned range;
1812

1813 1814
	for (range = 0; range < DRAM_RANGES; range++) {
		if (!dram_rw(pvt, range))
1815 1816
			continue;

1817 1818 1819 1820
		if (pvt->fam == 0x15 && pvt->model >= 0x30)
			cs_found = f15_m30h_match_to_this_node(pvt, range,
							       sys_addr,
							       chan_sel);
1821

1822 1823
		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
			 (get_dram_limit(pvt, range) >= sys_addr)) {
1824
			cs_found = f1x_match_to_this_node(pvt, range,
1825
							  sys_addr, chan_sel);
1826 1827 1828 1829 1830 1831 1832 1833
			if (cs_found >= 0)
				break;
		}
	}
	return cs_found;
}

/*
1834 1835
 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1836
 *
1837 1838
 * The @sys_addr is usually an error address received from the hardware
 * (MCX_ADDR).
1839
 */
1840
static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1841
				     struct err_info *err)
1842 1843 1844
{
	struct amd64_pvt *pvt = mci->pvt_info;

1845
	error_address_to_page_and_offset(sys_addr, err);
1846

1847 1848 1849
	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
	if (err->csrow < 0) {
		err->err_code = ERR_CSROW;
1850 1851 1852 1853 1854 1855 1856 1857
		return;
	}

	/*
	 * We need the syndromes for channel detection only when we're
	 * ganged. Otherwise @chan should already contain the channel at
	 * this point.
	 */
1858
	if (dct_ganging_enabled(pvt))
1859
		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1860 1861 1862
}

/*
1863
 * debug routine to display the memory sizes of all logical DIMMs and its
1864
 * CSROWs
1865
 */
1866
static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1867
{
1868
	int dimm, size0, size1;
1869 1870
	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1871

1872
	if (pvt->fam == 0xf) {
1873
		/* K8 families < revF not supported yet */
1874
	       if (pvt->ext_model < K8_REV_F)
1875 1876 1877 1878 1879
			return;
	       else
		       WARN_ON(ctrl != 0);
	}

1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
	if (pvt->fam == 0x10) {
		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
							   : pvt->dbam0;
		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
				 pvt->csels[1].csbases :
				 pvt->csels[0].csbases;
	} else if (ctrl) {
		dbam = pvt->dbam0;
		dcsb = pvt->csels[1].csbases;
	}
1890 1891
	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
		 ctrl, dbam);
1892

1893 1894
	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);

1895 1896 1897 1898
	/* Dump memory sizes for DIMM and its CSROWs */
	for (dimm = 0; dimm < 4; dimm++) {

		size0 = 0;
1899
		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1900 1901 1902 1903 1904
			/* For f15m60h, need multiplier for LRDIMM cs_size
			 * calculation. We pass 'dimm' value to the dbam_to_cs
			 * mapper so we can find the multiplier from the
			 * corresponding DCSM.
			 */
1905
			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1906 1907
						     DBAM_DIMM(dimm, dbam),
						     dimm);
1908 1909

		size1 = 0;
1910
		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1911
			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1912 1913
						     DBAM_DIMM(dimm, dbam),
						     dimm);
1914

1915
		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1916 1917
				dimm * 2,     size0,
				dimm * 2 + 1, size1);
1918 1919 1920
	}
}

1921
static struct amd64_family_type family_types[] = {
1922
	[K8_CPUS] = {
1923
		.ctl_name = "K8",
1924
		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1925
		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
1926
		.ops = {
1927 1928 1929
			.early_channel_count	= k8_early_channel_count,
			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
			.dbam_to_cs		= k8_dbam_to_chip_select,
1930 1931 1932
		}
	},
	[F10_CPUS] = {
1933
		.ctl_name = "F10h",
1934
		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1935
		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
1936
		.ops = {
1937
			.early_channel_count	= f1x_early_channel_count,
1938
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1939
			.dbam_to_cs		= f10_dbam_to_chip_select,
1940 1941 1942 1943
		}
	},
	[F15_CPUS] = {
		.ctl_name = "F15h",
1944
		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1945
		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
1946
		.ops = {
1947
			.early_channel_count	= f1x_early_channel_count,
1948
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1949
			.dbam_to_cs		= f15_dbam_to_chip_select,
1950 1951
		}
	},
1952 1953 1954
	[F15_M30H_CPUS] = {
		.ctl_name = "F15h_M30h",
		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1955
		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
1956 1957 1958 1959 1960 1961
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
1962 1963 1964
	[F15_M60H_CPUS] = {
		.ctl_name = "F15h_M60h",
		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1965
		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
1966 1967 1968 1969 1970 1971
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
		}
	},
1972 1973 1974
	[F16_CPUS] = {
		.ctl_name = "F16h",
		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1975
		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
1976 1977 1978 1979 1980 1981
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
1982 1983 1984
	[F16_M30H_CPUS] = {
		.ctl_name = "F16h_M30h",
		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1985
		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
1986 1987 1988 1989 1990 1991
		.ops = {
			.early_channel_count	= f1x_early_channel_count,
			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
			.dbam_to_cs		= f16_dbam_to_chip_select,
		}
	},
1992 1993
};

1994
/*
1995 1996 1997
 * These are tables of eigenvectors (one per line) which can be used for the
 * construction of the syndrome tables. The modified syndrome search algorithm
 * uses those to find the symbol in error and thus the DIMM.
1998
 *
1999
 * Algorithm courtesy of Ross LaFetra from AMD.
2000
 */
2001
static const u16 x4_vectors[] = {
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
	0x2f57, 0x1afe, 0x66cc, 0xdd88,
	0x11eb, 0x3396, 0x7f4c, 0xeac8,
	0x0001, 0x0002, 0x0004, 0x0008,
	0x1013, 0x3032, 0x4044, 0x8088,
	0x106b, 0x30d6, 0x70fc, 0xe0a8,
	0x4857, 0xc4fe, 0x13cc, 0x3288,
	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
	0x15c1, 0x2a42, 0x89ac, 0x4758,
	0x2b03, 0x1602, 0x4f0c, 0xca08,
	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
	0x8ba7, 0x465e, 0x244c, 0x1cc8,
	0x2b87, 0x164e, 0x642c, 0xdc18,
	0x40b9, 0x80de, 0x1094, 0x20e8,
	0x27db, 0x1eb6, 0x9dac, 0x7b58,
	0x11c1, 0x2242, 0x84ac, 0x4c58,
	0x1be5, 0x2d7a, 0x5e34, 0xa718,
	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
	0x4c97, 0xc87e, 0x11fc, 0x33a8,
	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
	0x16b3, 0x3d62, 0x4f34, 0x8518,
	0x1e2f, 0x391a, 0x5cac, 0xf858,
	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
	0x4397, 0xc27e, 0x17fc, 0x3ea8,
	0x1617, 0x3d3e, 0x6464, 0xb8b8,
	0x23ff, 0x12aa, 0xab6c, 0x56d8,
	0x2dfb, 0x1ba6, 0x913c, 0x7328,
	0x185d, 0x2ca6, 0x7914, 0x9e28,
	0x171b, 0x3e36, 0x7d7c, 0xebe8,
	0x4199, 0x82ee, 0x19f4, 0x2e58,
	0x4807, 0xc40e, 0x130c, 0x3208,
	0x1905, 0x2e0a, 0x5804, 0xac08,
	0x213f, 0x132a, 0xadfc, 0x5ba8,
	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2038 2039
};

2040
static const u16 x8_vectors[] = {
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};

2062
static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2063
			   unsigned v_dim)
2064
{
2065 2066 2067 2068
	unsigned int i, err_sym;

	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
		u16 s = syndrome;
2069 2070
		unsigned v_idx =  err_sym * v_dim;
		unsigned v_end = (err_sym + 1) * v_dim;
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082

		/* walk over all 16 bits of the syndrome */
		for (i = 1; i < (1U << 16); i <<= 1) {

			/* if bit is set in that eigenvector... */
			if (v_idx < v_end && vectors[v_idx] & i) {
				u16 ev_comp = vectors[v_idx++];

				/* ... and bit set in the modified syndrome, */
				if (s & i) {
					/* remove it. */
					s ^= ev_comp;
2083

2084 2085 2086
					if (!s)
						return err_sym;
				}
2087

2088 2089 2090 2091
			} else if (s & i)
				/* can't get to zero, move to next symbol */
				break;
		}
2092 2093
	}

2094
	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2095 2096
	return -1;
}
2097

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
static int map_err_sym_to_channel(int err_sym, int sym_size)
{
	if (sym_size == 4)
		switch (err_sym) {
		case 0x20:
		case 0x21:
			return 0;
			break;
		case 0x22:
		case 0x23:
			return 1;
			break;
		default:
			return err_sym >> 4;
			break;
		}
	/* x8 symbols */
	else
		switch (err_sym) {
		/* imaginary bits not in a DIMM */
		case 0x10:
			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
					  err_sym);
			return -1;
			break;

		case 0x11:
			return 0;
			break;
		case 0x12:
			return 1;
			break;
		default:
			return err_sym >> 3;
			break;
		}
	return -1;
}

static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
{
	struct amd64_pvt *pvt = mci->pvt_info;
2140 2141
	int err_sym = -1;

2142
	if (pvt->ecc_sym_sz == 8)
2143 2144
		err_sym = decode_syndrome(syndrome, x8_vectors,
					  ARRAY_SIZE(x8_vectors),
2145 2146
					  pvt->ecc_sym_sz);
	else if (pvt->ecc_sym_sz == 4)
2147 2148
		err_sym = decode_syndrome(syndrome, x4_vectors,
					  ARRAY_SIZE(x4_vectors),
2149
					  pvt->ecc_sym_sz);
2150
	else {
2151
		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2152
		return err_sym;
2153
	}
2154

2155
	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2156 2157
}

2158 2159
static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
			    u8 ecc_type)
2160
{
2161 2162
	enum hw_event_mc_err_type err_type;
	const char *string;
2163

2164 2165 2166 2167 2168 2169
	if (ecc_type == 2)
		err_type = HW_EVENT_ERR_CORRECTED;
	else if (ecc_type == 1)
		err_type = HW_EVENT_ERR_UNCORRECTED;
	else {
		WARN(1, "Something is rotten in the state of Denmark.\n");
2170 2171 2172
		return;
	}

2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188
	switch (err->err_code) {
	case DECODE_OK:
		string = "";
		break;
	case ERR_NODE:
		string = "Failed to map error addr to a node";
		break;
	case ERR_CSROW:
		string = "Failed to map error addr to a csrow";
		break;
	case ERR_CHANNEL:
		string = "unknown syndrome - possible error reporting race";
		break;
	default:
		string = "WTF error";
		break;
2189
	}
2190 2191 2192 2193 2194

	edac_mc_handle_error(err_type, mci, 1,
			     err->page, err->offset, err->syndrome,
			     err->csrow, err->channel, -1,
			     string, "");
2195 2196
}

2197
static inline void decode_bus_error(int node_id, struct mce *m)
2198
{
2199 2200
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
2201
	u8 ecc_type = (m->status >> 45) & 0x3;
2202 2203
	u8 xec = XEC(m->status, 0x1f);
	u16 ec = EC(m->status);
2204 2205
	u64 sys_addr;
	struct err_info err;
2206

2207 2208 2209 2210 2211 2212
	mci = edac_mc_find(node_id);
	if (!mci)
		return;

	pvt = mci->pvt_info;

2213
	/* Bail out early if this was an 'observed' error */
2214
	if (PP(ec) == NBSL_PP_OBS)
2215
		return;
2216

2217 2218
	/* Do only ECC errors */
	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2219 2220
		return;

2221 2222
	memset(&err, 0, sizeof(err));

2223
	sys_addr = get_error_address(pvt, m);
2224

2225
	if (ecc_type == 2)
2226 2227 2228 2229 2230
		err.syndrome = extract_syndrome(m->status);

	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);

	__log_bus_error(mci, &err, ecc_type);
2231 2232
}

2233
/*
2234 2235
 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2236
 */
2237
static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
2238 2239
{
	/* Reserve the ADDRESS MAP Device */
2240
	pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
2241
	if (!pvt->F1) {
2242 2243 2244
		amd64_err("error address map device not found: "
			  "vendor %x device 0x%x (broken BIOS?)\n",
			  PCI_VENDOR_ID_AMD, f1_id);
2245
		return -ENODEV;
2246 2247
	}

2248 2249 2250
	/* Reserve the DCT Device */
	pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
	if (!pvt->F2) {
2251 2252
		pci_dev_put(pvt->F1);
		pvt->F1 = NULL;
2253

2254
		amd64_err("error F2 device not found: "
2255
			  "vendor %x device 0x%x (broken BIOS?)\n",
2256
			  PCI_VENDOR_ID_AMD, f2_id);
2257

2258
		return -ENODEV;
2259
	}
2260 2261 2262
	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2263 2264 2265 2266

	return 0;
}

2267
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2268
{
2269
	pci_dev_put(pvt->F1);
2270
	pci_dev_put(pvt->F2);
2271 2272 2273 2274 2275 2276
}

/*
 * Retrieve the hardware registers of the memory controller (this includes the
 * 'Address Map' and 'Misc' device regs)
 */
2277
static void read_mc_regs(struct amd64_pvt *pvt)
2278
{
2279
	unsigned range;
2280
	u64 msr_val;
2281
	u32 tmp;
2282 2283 2284 2285 2286

	/*
	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
	 * those are Read-As-Zero
	 */
2287
	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2288
	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2289 2290 2291 2292

	/* check first whether TOP_MEM2 is enabled */
	rdmsrl(MSR_K8_SYSCFG, msr_val);
	if (msr_val & (1U << 21)) {
2293
		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2294
		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2295
	} else
2296
		edac_dbg(0, "  TOP_MEM2 disabled\n");
2297

2298
	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2299

2300
	read_dram_ctl_register(pvt);
2301

2302 2303
	for (range = 0; range < DRAM_RANGES; range++) {
		u8 rw;
2304

2305 2306 2307 2308 2309 2310 2311
		/* read settings for this DRAM range */
		read_dram_base_limit_regs(pvt, range);

		rw = dram_rw(pvt, range);
		if (!rw)
			continue;

2312 2313 2314 2315
		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
			 range,
			 get_dram_base(pvt, range),
			 get_dram_limit(pvt, range));
2316

2317 2318 2319 2320 2321 2322
		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
			 (rw & 0x1) ? "R" : "-",
			 (rw & 0x2) ? "W" : "-",
			 dram_intlv_sel(pvt, range),
			 dram_dst_node(pvt, range));
2323 2324
	}

2325
	read_dct_base_mask(pvt);
2326

2327
	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2328
	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2329

2330
	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2331

2332 2333
	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2334

2335
	if (!dct_ganging_enabled(pvt)) {
2336 2337
		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2338
	}
2339

2340
	pvt->ecc_sym_sz = 4;
2341 2342
	determine_memory_type(pvt);
	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2343

2344
	if (pvt->fam >= 0x10) {
2345
		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2346
		/* F16h has only DCT0, so no need to read dbam1 */
2347
		if (pvt->fam != 0x16)
2348
			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2349

2350
		/* F10h, revD and later can do x8 ECC too */
2351
		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2352 2353
			pvt->ecc_sym_sz = 8;
	}
2354
	dump_misc_regs(pvt);
2355 2356 2357 2358 2359 2360
}

/*
 * NOTE: CPU Revision Dependent code
 *
 * Input:
2361
 *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
 *	k8 private pointer to -->
 *			DRAM Bank Address mapping register
 *			node_id
 *			DCL register where dual_channel_active is
 *
 * The DBAM register consists of 4 sets of 4 bits each definitions:
 *
 * Bits:	CSROWs
 * 0-3		CSROWs 0 and 1
 * 4-7		CSROWs 2 and 3
 * 8-11		CSROWs 4 and 5
 * 12-15	CSROWs 6 and 7
 *
 * Values range from: 0 to 15
 * The meaning of the values depends on CPU revision and dual-channel state,
 * see relevant BKDG more info.
 *
 * The memory controller provides for total of only 8 CSROWs in its current
 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
 * single channel or two (2) DIMMs in dual channel mode.
 *
 * The following code logic collapses the various tables for CSROW based on CPU
 * revision.
 *
 * Returns:
 *	The number of PAGE_SIZE pages on the specified CSROW number it
 *	encompasses
 *
 */
2391
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2392
{
2393
	u32 cs_mode, nr_pages;
2394
	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2395

2396

2397 2398 2399 2400 2401 2402 2403
	/*
	 * The math on this doesn't look right on the surface because x/2*4 can
	 * be simplified to x*2 but this expression makes use of the fact that
	 * it is integral math where 1/2=0. This intermediate value becomes the
	 * number of bits to shift the DBAM register to extract the proper CSROW
	 * field.
	 */
B
Borislav Petkov 已提交
2404
	cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2405

2406 2407
	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
							   << (20 - PAGE_SHIFT);
2408

2409 2410 2411
	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
		    csrow_nr, dct,  cs_mode);
	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2412 2413 2414 2415 2416 2417 2418 2419

	return nr_pages;
}

/*
 * Initialize the array of csrow attribute instances, based on the values
 * from pci config hardware registers.
 */
2420
static int init_csrows(struct mem_ctl_info *mci)
2421
{
2422
	struct amd64_pvt *pvt = mci->pvt_info;
2423
	struct csrow_info *csrow;
2424
	struct dimm_info *dimm;
2425
	enum edac_type edac_mode;
2426
	int i, j, empty = 1;
2427
	int nr_pages = 0;
2428
	u32 val;
2429

2430
	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2431

2432
	pvt->nbcfg = val;
2433

2434 2435 2436
	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
		 pvt->mc_node_id, val,
		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2437

2438 2439 2440
	/*
	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
	 */
2441
	for_each_chip_select(i, 0, pvt) {
2442 2443
		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
		bool row_dct1 = false;
2444

2445
		if (pvt->fam != 0xf)
2446 2447 2448
			row_dct1 = !!csrow_enabled(i, 1, pvt);

		if (!row_dct0 && !row_dct1)
2449 2450
			continue;

2451
		csrow = mci->csrows[i];
2452
		empty = 0;
2453 2454 2455 2456

		edac_dbg(1, "MC node: %d, csrow: %d\n",
			    pvt->mc_node_id, i);

2457
		if (row_dct0) {
2458
			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2459 2460
			csrow->channels[0]->dimm->nr_pages = nr_pages;
		}
2461

2462
		/* K8 has only one DCT */
2463
		if (pvt->fam != 0xf && row_dct1) {
2464
			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2465 2466 2467 2468

			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
			nr_pages += row_dct1_pages;
		}
2469

2470
		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2471 2472 2473 2474

		/*
		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
		 */
2475
		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2476 2477
			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
				    EDAC_S4ECD4ED : EDAC_SECDED;
2478
		else
2479 2480 2481
			edac_mode = EDAC_NONE;

		for (j = 0; j < pvt->channel_count; j++) {
2482
			dimm = csrow->channels[j]->dimm;
2483
			dimm->mtype = pvt->dram_type;
2484
			dimm->edac_mode = edac_mode;
2485
		}
2486 2487 2488 2489
	}

	return empty;
}
2490

2491
/* get all cores on this DCT */
2492
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2493 2494 2495 2496 2497 2498 2499 2500 2501
{
	int cpu;

	for_each_online_cpu(cpu)
		if (amd_get_nb_id(cpu) == nid)
			cpumask_set_cpu(cpu, mask);
}

/* check MCG_CTL on all the cpus on this node */
2502
static bool nb_mce_bank_enabled_on_node(u16 nid)
2503 2504
{
	cpumask_var_t mask;
2505
	int cpu, nbe;
2506 2507 2508
	bool ret = false;

	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2509
		amd64_warn("%s: Error allocating mask\n", __func__);
2510 2511 2512 2513 2514 2515 2516 2517
		return false;
	}

	get_cpus_on_this_dct_cpumask(mask, nid);

	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);

	for_each_cpu(cpu, mask) {
2518
		struct msr *reg = per_cpu_ptr(msrs, cpu);
2519
		nbe = reg->l & MSR_MCGCTL_NBE;
2520

2521 2522 2523
		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
			 cpu, reg->q,
			 (nbe ? "enabled" : "disabled"));
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534

		if (!nbe)
			goto out;
	}
	ret = true;

out:
	free_cpumask_var(mask);
	return ret;
}

2535
static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2536 2537
{
	cpumask_var_t cmask;
2538
	int cpu;
2539 2540

	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2541
		amd64_warn("%s: error allocating mask\n", __func__);
2542 2543 2544
		return false;
	}

2545
	get_cpus_on_this_dct_cpumask(cmask, nid);
2546 2547 2548 2549 2550

	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);

	for_each_cpu(cpu, cmask) {

2551 2552
		struct msr *reg = per_cpu_ptr(msrs, cpu);

2553
		if (on) {
2554
			if (reg->l & MSR_MCGCTL_NBE)
2555
				s->flags.nb_mce_enable = 1;
2556

2557
			reg->l |= MSR_MCGCTL_NBE;
2558 2559
		} else {
			/*
2560
			 * Turn off NB MCE reporting only when it was off before
2561
			 */
2562
			if (!s->flags.nb_mce_enable)
2563
				reg->l &= ~MSR_MCGCTL_NBE;
2564 2565 2566 2567 2568 2569 2570 2571 2572
		}
	}
	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);

	free_cpumask_var(cmask);

	return 0;
}

2573
static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2574
				       struct pci_dev *F3)
2575
{
2576
	bool ret = true;
B
Borislav Petkov 已提交
2577
	u32 value, mask = 0x3;		/* UECC/CECC enable */
2578

2579 2580 2581 2582 2583
	if (toggle_ecc_err_reporting(s, nid, ON)) {
		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
		return false;
	}

B
Borislav Petkov 已提交
2584
	amd64_read_pci_cfg(F3, NBCTL, &value);
2585

2586 2587
	s->old_nbctl   = value & mask;
	s->nbctl_valid = true;
2588 2589

	value |= mask;
B
Borislav Petkov 已提交
2590
	amd64_write_pci_cfg(F3, NBCTL, value);
2591

2592
	amd64_read_pci_cfg(F3, NBCFG, &value);
2593

2594 2595
	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2596

2597
	if (!(value & NBCFG_ECC_ENABLE)) {
2598
		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2599

2600
		s->flags.nb_ecc_prev = 0;
2601

2602
		/* Attempt to turn on DRAM ECC Enable */
2603 2604
		value |= NBCFG_ECC_ENABLE;
		amd64_write_pci_cfg(F3, NBCFG, value);
2605

2606
		amd64_read_pci_cfg(F3, NBCFG, &value);
2607

2608
		if (!(value & NBCFG_ECC_ENABLE)) {
2609 2610
			amd64_warn("Hardware rejected DRAM ECC enable,"
				   "check memory DIMM configuration.\n");
2611
			ret = false;
2612
		} else {
2613
			amd64_info("Hardware accepted DRAM ECC Enable\n");
2614
		}
2615
	} else {
2616
		s->flags.nb_ecc_prev = 1;
2617
	}
2618

2619 2620
	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2621

2622
	return ret;
2623 2624
}

2625
static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2626
					struct pci_dev *F3)
2627
{
B
Borislav Petkov 已提交
2628 2629
	u32 value, mask = 0x3;		/* UECC/CECC enable */

2630

2631
	if (!s->nbctl_valid)
2632 2633
		return;

B
Borislav Petkov 已提交
2634
	amd64_read_pci_cfg(F3, NBCTL, &value);
2635
	value &= ~mask;
2636
	value |= s->old_nbctl;
2637

B
Borislav Petkov 已提交
2638
	amd64_write_pci_cfg(F3, NBCTL, value);
2639

2640 2641
	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
	if (!s->flags.nb_ecc_prev) {
2642 2643 2644
		amd64_read_pci_cfg(F3, NBCFG, &value);
		value &= ~NBCFG_ECC_ENABLE;
		amd64_write_pci_cfg(F3, NBCFG, value);
2645 2646 2647
	}

	/* restore the NB Enable MCGCTL bit */
2648
	if (toggle_ecc_err_reporting(s, nid, OFF))
2649
		amd64_warn("Error restoring NB MCGCTL settings!\n");
2650 2651 2652
}

/*
2653 2654 2655 2656
 * EDAC requires that the BIOS have ECC enabled before
 * taking over the processing of ECC errors. A command line
 * option allows to force-enable hardware ECC later in
 * enable_ecc_error_reporting().
2657
 */
2658 2659 2660 2661 2662
static const char *ecc_msg =
	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
	" Either enable ECC checking or force module loading by setting "
	"'ecc_enable_override'.\n"
	" (Note that use of the override may cause unknown side effects.)\n";
2663

2664
static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2665 2666
{
	u32 value;
2667
	u8 ecc_en = 0;
2668
	bool nb_mce_en = false;
2669

2670
	amd64_read_pci_cfg(F3, NBCFG, &value);
2671

2672
	ecc_en = !!(value & NBCFG_ECC_ENABLE);
2673
	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2674

2675
	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2676
	if (!nb_mce_en)
2677 2678 2679
		amd64_notice("NB MCE bank disabled, set MSR "
			     "0x%08x[4] on node %d to enable.\n",
			     MSR_IA32_MCG_CTL, nid);
2680

2681 2682 2683 2684 2685
	if (!ecc_en || !nb_mce_en) {
		amd64_notice("%s", ecc_msg);
		return false;
	}
	return true;
2686 2687
}

2688 2689
static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
				 struct amd64_family_type *fam)
2690 2691 2692 2693 2694 2695
{
	struct amd64_pvt *pvt = mci->pvt_info;

	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
	mci->edac_ctl_cap	= EDAC_FLAG_NONE;

2696
	if (pvt->nbcap & NBCAP_SECDED)
2697 2698
		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;

2699
	if (pvt->nbcap & NBCAP_CHIPKILL)
2700 2701
		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;

2702
	mci->edac_cap		= determine_edac_cap(pvt);
2703 2704
	mci->mod_name		= EDAC_MOD_STR;
	mci->mod_ver		= EDAC_AMD64_VERSION;
2705
	mci->ctl_name		= fam->ctl_name;
2706
	mci->dev_name		= pci_name(pvt->F3);
2707 2708 2709
	mci->ctl_page_to_phys	= NULL;

	/* memory scrubber interface */
2710 2711
	mci->set_sdram_scrub_rate = set_scrub_rate;
	mci->get_sdram_scrub_rate = get_scrub_rate;
2712 2713
}

2714 2715 2716
/*
 * returns a pointer to the family descriptor on success, NULL otherwise.
 */
2717
static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2718
{
2719 2720
	struct amd64_family_type *fam_type = NULL;

2721
	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
2722
	pvt->stepping	= boot_cpu_data.x86_mask;
2723 2724 2725 2726
	pvt->model	= boot_cpu_data.x86_model;
	pvt->fam	= boot_cpu_data.x86;

	switch (pvt->fam) {
2727
	case 0xf:
2728 2729
		fam_type	= &family_types[K8_CPUS];
		pvt->ops	= &family_types[K8_CPUS].ops;
2730
		break;
2731

2732
	case 0x10:
2733 2734
		fam_type	= &family_types[F10_CPUS];
		pvt->ops	= &family_types[F10_CPUS].ops;
2735 2736 2737
		break;

	case 0x15:
2738
		if (pvt->model == 0x30) {
2739 2740
			fam_type = &family_types[F15_M30H_CPUS];
			pvt->ops = &family_types[F15_M30H_CPUS].ops;
2741
			break;
2742 2743 2744 2745
		} else if (pvt->model == 0x60) {
			fam_type = &family_types[F15_M60H_CPUS];
			pvt->ops = &family_types[F15_M60H_CPUS].ops;
			break;
2746 2747
		}

2748 2749
		fam_type	= &family_types[F15_CPUS];
		pvt->ops	= &family_types[F15_CPUS].ops;
2750 2751
		break;

2752
	case 0x16:
2753 2754 2755 2756 2757
		if (pvt->model == 0x30) {
			fam_type = &family_types[F16_M30H_CPUS];
			pvt->ops = &family_types[F16_M30H_CPUS].ops;
			break;
		}
2758 2759
		fam_type	= &family_types[F16_CPUS];
		pvt->ops	= &family_types[F16_CPUS].ops;
2760 2761
		break;

2762
	default:
2763
		amd64_err("Unsupported family!\n");
2764
		return NULL;
2765
	}
2766

2767
	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2768
		     (pvt->fam == 0xf ?
2769 2770 2771
				(pvt->ext_model >= K8_REV_F  ? "revF or later "
							     : "revE or earlier ")
				 : ""), pvt->mc_node_id);
2772
	return fam_type;
2773 2774
}

2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
	&amd64_edac_dbg_group,
#endif
#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
	&amd64_edac_inj_group,
#endif
	NULL
};

2785
static int init_one_instance(unsigned int nid)
2786
{
2787
	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2788
	struct amd64_family_type *fam_type = NULL;
2789
	struct mem_ctl_info *mci = NULL;
2790
	struct edac_mc_layer layers[2];
2791
	struct amd64_pvt *pvt = NULL;
2792 2793 2794 2795 2796
	int err = 0, ret;

	ret = -ENOMEM;
	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
	if (!pvt)
2797
		goto err_ret;
2798

2799
	pvt->mc_node_id	= nid;
2800
	pvt->F3 = F3;
2801

2802
	ret = -EINVAL;
2803
	fam_type = per_family_init(pvt);
2804
	if (!fam_type)
2805 2806
		goto err_free;

2807
	ret = -ENODEV;
2808
	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
2809 2810 2811
	if (err)
		goto err_free;

2812
	read_mc_regs(pvt);
2813 2814 2815 2816

	/*
	 * We need to determine how many memory channels there are. Then use
	 * that information for calculating the size of the dynamic instance
2817
	 * tables in the 'mci' structure.
2818
	 */
2819
	ret = -EINVAL;
2820 2821
	pvt->channel_count = pvt->ops->early_channel_count(pvt);
	if (pvt->channel_count < 0)
2822
		goto err_siblings;
2823 2824

	ret = -ENOMEM;
2825 2826 2827 2828
	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
	layers[0].size = pvt->csels[0].b_cnt;
	layers[0].is_virt_csrow = true;
	layers[1].type = EDAC_MC_LAYER_CHANNEL;
2829 2830 2831 2832 2833 2834 2835

	/*
	 * Always allocate two channels since we can have setups with DIMMs on
	 * only one channel. Also, this simplifies handling later for the price
	 * of a couple of KBs tops.
	 */
	layers[1].size = 2;
2836
	layers[1].is_virt_csrow = false;
2837

2838
	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2839
	if (!mci)
2840
		goto err_siblings;
2841 2842

	mci->pvt_info = pvt;
2843
	mci->pdev = &pvt->F3->dev;
2844

2845
	setup_mci_misc_attrs(mci, fam_type);
2846 2847

	if (init_csrows(mci))
2848 2849 2850
		mci->edac_cap = EDAC_FLAG_NONE;

	ret = -ENODEV;
2851
	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
2852
		edac_dbg(1, "failed edac_mc_add_mc()\n");
2853 2854 2855
		goto err_add_mc;
	}

2856 2857 2858 2859
	/* register stuff with EDAC MCE */
	if (report_gart_errors)
		amd_report_gart_errors(true);

2860
	amd_register_ecc_decoder(decode_bus_error);
2861

2862 2863 2864 2865 2866
	return 0;

err_add_mc:
	edac_mc_free(mci);

2867 2868
err_siblings:
	free_mc_sibling_devs(pvt);
2869

2870 2871
err_free:
	kfree(pvt);
2872

2873
err_ret:
2874 2875 2876
	return ret;
}

2877
static int probe_one_instance(unsigned int nid)
2878
{
2879
	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2880
	struct ecc_settings *s;
2881
	int ret;
2882

2883 2884 2885
	ret = -ENOMEM;
	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
	if (!s)
2886
		goto err_out;
2887 2888 2889

	ecc_stngs[nid] = s;

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
	if (!ecc_enabled(F3, nid)) {
		ret = -ENODEV;

		if (!ecc_enable_override)
			goto err_enable;

		amd64_warn("Forcing ECC on!\n");

		if (!enable_ecc_error_reporting(s, nid, F3))
			goto err_enable;
	}

2902
	ret = init_one_instance(nid);
2903
	if (ret < 0) {
2904
		amd64_err("Error probing instance: %d\n", nid);
2905 2906
		restore_ecc_error_reporting(s, nid, F3);
	}
2907 2908

	return ret;
2909 2910 2911 2912 2913 2914 2915

err_enable:
	kfree(s);
	ecc_stngs[nid] = NULL;

err_out:
	return ret;
2916 2917
}

2918
static void remove_one_instance(unsigned int nid)
2919
{
2920 2921
	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
	struct ecc_settings *s = ecc_stngs[nid];
2922 2923
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;
2924

2925
	mci = find_mci_by_dev(&F3->dev);
2926 2927
	WARN_ON(!mci);

2928
	/* Remove from EDAC CORE tracking list */
2929
	mci = edac_mc_del_mc(&F3->dev);
2930 2931 2932 2933 2934
	if (!mci)
		return;

	pvt = mci->pvt_info;

2935
	restore_ecc_error_reporting(s, nid, F3);
2936

2937
	free_mc_sibling_devs(pvt);
2938

2939 2940
	/* unregister from EDAC MCE */
	amd_report_gart_errors(false);
2941
	amd_unregister_ecc_decoder(decode_bus_error);
2942

2943 2944
	kfree(ecc_stngs[nid]);
	ecc_stngs[nid] = NULL;
2945

2946
	/* Free the EDAC CORE resources */
2947 2948 2949
	mci->pvt_info = NULL;

	kfree(pvt);
2950 2951 2952
	edac_mc_free(mci);
}

2953
static void setup_pci_device(void)
2954 2955 2956 2957
{
	struct mem_ctl_info *mci;
	struct amd64_pvt *pvt;

2958
	if (pci_ctl)
2959 2960
		return;

2961
	mci = edac_mc_find(0);
2962 2963
	if (!mci)
		return;
2964

2965 2966 2967 2968 2969
	pvt = mci->pvt_info;
	pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
	if (!pci_ctl) {
		pr_warn("%s(): Unable to create PCI control\n", __func__);
		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2970 2971 2972
	}
}

2973 2974 2975 2976 2977 2978 2979 2980 2981
static const struct x86_cpu_id amd64_cpuids[] = {
	{ X86_VENDOR_AMD, 0xF,	X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
	{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);

2982 2983
static int __init amd64_edac_init(void)
{
2984
	int err = -ENODEV;
2985
	int i;
2986

2987
	if (amd_cache_northbridges() < 0)
2988
		goto err_ret;
2989

2990 2991
	opstate_init();

2992
	err = -ENOMEM;
2993
	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2994
	if (!ecc_stngs)
2995
		goto err_free;
2996

2997
	msrs = msrs_alloc();
2998
	if (!msrs)
2999
		goto err_free;
3000

3001 3002 3003 3004 3005
	for (i = 0; i < amd_nb_num(); i++)
		if (probe_one_instance(i)) {
			/* unwind properly */
			while (--i >= 0)
				remove_one_instance(i);
3006

3007 3008
			goto err_pci;
		}
3009

3010
	setup_pci_device();
T
Tomasz Pala 已提交
3011 3012 3013 3014 3015

#ifdef CONFIG_X86_32
	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif

3016 3017
	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);

3018
	return 0;
3019

3020 3021 3022
err_pci:
	msrs_free(msrs);
	msrs = NULL;
3023

3024 3025 3026 3027
err_free:
	kfree(ecc_stngs);
	ecc_stngs = NULL;

3028
err_ret:
3029 3030 3031 3032 3033
	return err;
}

static void __exit amd64_edac_exit(void)
{
3034 3035
	int i;

3036 3037
	if (pci_ctl)
		edac_pci_release_generic_ctl(pci_ctl);
3038

3039 3040
	for (i = 0; i < amd_nb_num(); i++)
		remove_one_instance(i);
3041

3042 3043 3044
	kfree(ecc_stngs);
	ecc_stngs = NULL;

3045 3046
	msrs_free(msrs);
	msrs = NULL;
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
}

module_init(amd64_edac_init);
module_exit(amd64_edac_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
		"Dave Peterson, Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
		EDAC_AMD64_VERSION);

module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");