fsl_ddr_edac.c 15.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Freescale Memory Controller kernel module
 *
 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
 * ARM-based Layerscape SoCs including LS2xxx. Originally split
 * out from mpc85xx_edac EDAC driver.
 *
 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
 *
 * Author: Dave Jiang <djiang@mvista.com>
 *
 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
 * the terms of the GNU General Public License version 2. This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/edac.h>
#include <linux/smp.h>
#include <linux/gfp.h>

#include <linux/of_platform.h>
#include <linux/of_device.h>
29
#include <linux/of_address.h>
30 31 32 33 34 35 36 37 38
#include "edac_module.h"
#include "fsl_ddr_edac.h"

#define EDAC_MOD_STR	"fsl_ddr_edac"

static int edac_mc_idx;

static u32 orig_ddr_err_disable;
static u32 orig_ddr_err_sbe;
39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool little_endian;

static inline u32 ddr_in32(void __iomem *addr)
{
	return little_endian ? ioread32(addr) : ioread32be(addr);
}

static inline void ddr_out32(void __iomem *addr, u32 value)
{
	if (little_endian)
		iowrite32(value, addr);
	else
		iowrite32be(value, addr);
}
53 54 55 56 57

/************************ MC SYSFS parts ***********************************/

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)

58 59 60
static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
61 62
{
	struct mem_ctl_info *mci = to_mci(dev);
63
	struct fsl_mc_pdata *pdata = mci->pvt_info;
64
	return sprintf(data, "0x%08x",
65
		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
66 67
}

68 69
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
					  struct device_attribute *mattr,
70 71 72
					      char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
73
	struct fsl_mc_pdata *pdata = mci->pvt_info;
74
	return sprintf(data, "0x%08x",
75
		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
76 77
}

78 79
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
				       struct device_attribute *mattr,
80 81 82
					   char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
83
	struct fsl_mc_pdata *pdata = mci->pvt_info;
84
	return sprintf(data, "0x%08x",
85
		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
86 87
}

88 89
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
					   struct device_attribute *mattr,
90 91 92
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
93
	struct fsl_mc_pdata *pdata = mci->pvt_info;
94 95 96
	unsigned long val;
	int rc;

97
	if (isdigit(*data)) {
98 99 100 101 102
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
103 104 105 106 107
		return count;
	}
	return 0;
}

108 109
static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
					   struct device_attribute *mattr,
110 111 112
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
113
	struct fsl_mc_pdata *pdata = mci->pvt_info;
114 115 116
	unsigned long val;
	int rc;

117
	if (isdigit(*data)) {
118 119 120 121 122
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
123 124 125 126 127
		return count;
	}
	return 0;
}

128 129
static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
					struct device_attribute *mattr,
130 131 132
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
133
	struct fsl_mc_pdata *pdata = mci->pvt_info;
134 135 136
	unsigned long val;
	int rc;

137
	if (isdigit(*data)) {
138 139 140 141 142
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
143 144 145 146 147
		return count;
	}
	return 0;
}

148 149 150 151 152 153
static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
154

155
static struct attribute *fsl_ddr_dev_attrs[] = {
156 157 158 159 160 161
	&dev_attr_inject_data_hi.attr,
	&dev_attr_inject_data_lo.attr,
	&dev_attr_inject_ctrl.attr,
	NULL
};

162
ATTRIBUTE_GROUPS(fsl_ddr_dev);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

/**************************** MC Err device ***************************/

/*
 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
 * below correspond to Freescale's manuals.
 */
static unsigned int ecc_table[16] = {
	/* MSB           LSB */
	/* [0:31]    [32:63] */
	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
	0x00ff00ff, 0x00fff0ff,
	0x0f0f0f0f, 0x0f0fff00,
	0x11113333, 0x7777000f,
	0x22224444, 0x8888222f,
	0x44448888, 0xffff4441,
	0x8888ffff, 0x11118882,
	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
};

/*
 * Calculate the correct ECC value for a 64-bit value specified by high:low
 */
static u8 calculate_ecc(u32 high, u32 low)
{
	u32 mask_low;
	u32 mask_high;
	int bit_cnt;
	u8 ecc = 0;
	int i;
	int j;

	for (i = 0; i < 8; i++) {
		mask_high = ecc_table[i * 2];
		mask_low = ecc_table[i * 2 + 1];
		bit_cnt = 0;

		for (j = 0; j < 32; j++) {
			if ((mask_high >> j) & 1)
				bit_cnt ^= (high >> j) & 1;
			if ((mask_low >> j) & 1)
				bit_cnt ^= (low >> j) & 1;
		}

		ecc |= bit_cnt << i;
	}

	return ecc;
}

/*
 * Create the syndrome code which is generated if the data line specified by
 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
 * User's Manual and 9-61 in the MPC8572 User's Manual.
 */
static u8 syndrome_from_bit(unsigned int bit) {
	int i;
	u8 syndrome = 0;

	/*
	 * Cycle through the upper or lower 32-bit portion of each value in
	 * ecc_table depending on if 'bit' is in the upper or lower half of
	 * 64-bit data.
	 */
	for (i = bit < 32; i < 16; i += 2)
		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);

	return syndrome;
}

/*
 * Decode data and ecc syndrome to determine what went wrong
 * Note: This can only decode single-bit errors
 */
static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
		       int *bad_data_bit, int *bad_ecc_bit)
{
	int i;
	u8 syndrome;

	*bad_data_bit = -1;
	*bad_ecc_bit = -1;

	/*
	 * Calculate the ECC of the captured data and XOR it with the captured
	 * ECC to find an ECC syndrome value we can search for
	 */
	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;

	/* Check if a data line is stuck... */
	for (i = 0; i < 64; i++) {
		if (syndrome == syndrome_from_bit(i)) {
			*bad_data_bit = i;
			return;
		}
	}

	/* If data is correct, check ECC bits for errors... */
	for (i = 0; i < 8; i++) {
		if ((syndrome >> i) & 0x1) {
			*bad_ecc_bit = i;
			return;
		}
	}
}

#define make64(high, low) (((u64)(high) << 32) | (low))

273
static void fsl_mc_check(struct mem_ctl_info *mci)
274
{
275
	struct fsl_mc_pdata *pdata = mci->pvt_info;
276 277 278 279 280 281 282 283 284 285 286 287
	struct csrow_info *csrow;
	u32 bus_width;
	u32 err_detect;
	u32 syndrome;
	u64 err_addr;
	u32 pfn;
	int row_index;
	u32 cap_high;
	u32 cap_low;
	int bad_data_bit;
	int bad_ecc_bit;

288
	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
289 290 291
	if (!err_detect)
		return;

292 293
	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
		      err_detect);
294 295 296

	/* no more processing if not ECC bit errors */
	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
297
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
298 299 300
		return;
	}

301
	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
302 303

	/* Mask off appropriate bits of syndrome based on bus width */
304 305
	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
		     DSC_DBW_MASK) ? 32 : 64;
306 307 308 309 310 311
	if (bus_width == 64)
		syndrome &= 0xff;
	else
		syndrome &= 0xffff;

	err_addr = make64(
312 313
		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
314 315 316 317 318 319 320 321
	pfn = err_addr >> PAGE_SHIFT;

	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
		csrow = mci->csrows[row_index];
		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
			break;
	}

322 323
	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
324 325 326 327 328 329 330 331 332 333

	/*
	 * Analyze single-bit errors on 64-bit wide buses
	 * TODO: Add support for 32-bit wide buses
	 */
	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
		sbe_ecc_decode(cap_high, cap_low, syndrome,
				&bad_data_bit, &bad_ecc_bit);

		if (bad_data_bit != -1)
334
			fsl_mc_printk(mci, KERN_ERR,
335 336
				"Faulty Data bit: %d\n", bad_data_bit);
		if (bad_ecc_bit != -1)
337
			fsl_mc_printk(mci, KERN_ERR,
338 339
				"Faulty ECC bit: %d\n", bad_ecc_bit);

340
		fsl_mc_printk(mci, KERN_ERR,
341 342 343 344 345 346
			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
			cap_high ^ (1 << (bad_data_bit - 32)),
			cap_low ^ (1 << bad_data_bit),
			syndrome ^ (1 << bad_ecc_bit));
	}

347
	fsl_mc_printk(mci, KERN_ERR,
348 349
			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
			cap_high, cap_low, syndrome);
350 351
	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
352 353 354

	/* we are out of range */
	if (row_index == mci->nr_csrows)
355
		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
356 357 358 359 360 361 362 363 364 365 366 367 368

	if (err_detect & DDR_EDE_SBE)
		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
				     pfn, err_addr & ~PAGE_MASK, syndrome,
				     row_index, 0, -1,
				     mci->ctl_name, "");

	if (err_detect & DDR_EDE_MBE)
		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
				     pfn, err_addr & ~PAGE_MASK, syndrome,
				     row_index, 0, -1,
				     mci->ctl_name, "");

369
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
370 371
}

372
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
373 374
{
	struct mem_ctl_info *mci = dev_id;
375
	struct fsl_mc_pdata *pdata = mci->pvt_info;
376 377
	u32 err_detect;

378
	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
379 380 381
	if (!err_detect)
		return IRQ_NONE;

382
	fsl_mc_check(mci);
383 384 385 386

	return IRQ_HANDLED;
}

387
static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
388
{
389
	struct fsl_mc_pdata *pdata = mci->pvt_info;
390 391 392 393 394 395 396 397
	struct csrow_info *csrow;
	struct dimm_info *dimm;
	u32 sdram_ctl;
	u32 sdtype;
	enum mem_type mtype;
	u32 cs_bnds;
	int index;

398
	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
399 400 401 402

	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
	if (sdram_ctl & DSC_RD_EN) {
		switch (sdtype) {
403
		case 0x02000000:
404 405
			mtype = MEM_RDDR;
			break;
406
		case 0x03000000:
407 408
			mtype = MEM_RDDR2;
			break;
409
		case 0x07000000:
410 411
			mtype = MEM_RDDR3;
			break;
412 413 414
		case 0x05000000:
			mtype = MEM_RDDR4;
			break;
415 416 417 418 419 420
		default:
			mtype = MEM_UNKNOWN;
			break;
		}
	} else {
		switch (sdtype) {
421
		case 0x02000000:
422 423
			mtype = MEM_DDR;
			break;
424
		case 0x03000000:
425 426
			mtype = MEM_DDR2;
			break;
427
		case 0x07000000:
428 429
			mtype = MEM_DDR3;
			break;
430 431 432
		case 0x05000000:
			mtype = MEM_DDR4;
			break;
433 434 435 436 437 438 439 440 441 442 443 444 445
		default:
			mtype = MEM_UNKNOWN;
			break;
		}
	}

	for (index = 0; index < mci->nr_csrows; index++) {
		u32 start;
		u32 end;

		csrow = mci->csrows[index];
		dimm = csrow->channels[0]->dimm;

446 447
		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
				   (index * FSL_MC_CS_BNDS_OFS));
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471

		start = (cs_bnds & 0xffff0000) >> 16;
		end   = (cs_bnds & 0x0000ffff);

		if (start == end)
			continue;	/* not populated */

		start <<= (24 - PAGE_SHIFT);
		end   <<= (24 - PAGE_SHIFT);
		end    |= (1 << (24 - PAGE_SHIFT)) - 1;

		csrow->first_page = start;
		csrow->last_page = end;

		dimm->nr_pages = end + 1 - start;
		dimm->grain = 8;
		dimm->mtype = mtype;
		dimm->dtype = DEV_UNKNOWN;
		if (sdram_ctl & DSC_X32_EN)
			dimm->dtype = DEV_X32;
		dimm->edac_mode = EDAC_SECDED;
	}
}

472
int fsl_mc_err_probe(struct platform_device *op)
473 474 475
{
	struct mem_ctl_info *mci;
	struct edac_mc_layer layers[2];
476
	struct fsl_mc_pdata *pdata;
477 478 479 480
	struct resource r;
	u32 sdram_ctl;
	int res;

481
	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
482 483 484 485 486 487 488 489 490 491 492
		return -ENOMEM;

	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
	layers[0].size = 4;
	layers[0].is_virt_csrow = true;
	layers[1].type = EDAC_MC_LAYER_CHANNEL;
	layers[1].size = 1;
	layers[1].is_virt_csrow = false;
	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
			    sizeof(*pdata));
	if (!mci) {
493
		devres_release_group(&op->dev, fsl_mc_err_probe);
494 495 496 497
		return -ENOMEM;
	}

	pdata = mci->pvt_info;
498
	pdata->name = "fsl_mc_err";
499 500 501 502 503 504
	mci->pdev = &op->dev;
	pdata->edac_idx = edac_mc_idx++;
	dev_set_drvdata(mci->pdev, mci);
	mci->ctl_name = pdata->name;
	mci->dev_name = pdata->name;

505 506 507 508 509 510
	/*
	 * Get the endianness of DDR controller registers.
	 * Default is big endian.
	 */
	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	res = of_address_to_resource(op->dev.of_node, 0, &r);
	if (res) {
		pr_err("%s: Unable to get resource for MC err regs\n",
		       __func__);
		goto err;
	}

	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
				     pdata->name)) {
		pr_err("%s: Error while requesting mem region\n",
		       __func__);
		res = -EBUSY;
		goto err;
	}

	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
	if (!pdata->mc_vbase) {
		pr_err("%s: Unable to setup MC err regs\n", __func__);
		res = -ENOMEM;
		goto err;
	}

533
	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
534 535 536 537 538 539 540 541
	if (!(sdram_ctl & DSC_ECC_EN)) {
		/* no ECC */
		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
		res = -ENODEV;
		goto err;
	}

	edac_dbg(3, "init mci\n");
542 543 544 545
	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
546 547 548 549 550
	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
	mci->edac_cap = EDAC_FLAG_SECDED;
	mci->mod_name = EDAC_MOD_STR;

	if (edac_op_state == EDAC_OPSTATE_POLL)
551
		mci->edac_check = fsl_mc_check;
552 553 554 555 556

	mci->ctl_page_to_phys = NULL;

	mci->scrub_mode = SCRUB_SW_SRC;

557
	fsl_ddr_init_csrows(mci);
558 559

	/* store the original error disable bits */
560 561
	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
562 563

	/* clear all error bits */
564
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
565

566 567
	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
	if (res) {
568 569 570 571 572
		edac_dbg(3, "failed edac_mc_add_mc()\n");
		goto err;
	}

	if (edac_op_state == EDAC_OPSTATE_INT) {
573 574
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
			  DDR_EIE_MBEE | DDR_EIE_SBEE);
575 576

		/* store the original error management threshold */
577 578
		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
					    FSL_MC_ERR_SBE) & 0xff0000;
579 580

		/* set threshold to 1 error per interrupt */
581
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
582 583

		/* register interrupts */
584
		pdata->irq = platform_get_irq(op, 0);
585
		res = devm_request_irq(&op->dev, pdata->irq,
586
				       fsl_mc_isr,
587 588 589
				       IRQF_SHARED,
				       "[EDAC] MC err", mci);
		if (res < 0) {
590
			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
591 592 593 594 595 596 597 598 599
			       __func__, pdata->irq);
			res = -ENODEV;
			goto err2;
		}

		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
		       pdata->irq);
	}

600
	devres_remove_group(&op->dev, fsl_mc_err_probe);
601 602 603 604 605 606 607 608
	edac_dbg(3, "success\n");
	pr_info(EDAC_MOD_STR " MC err registered\n");

	return 0;

err2:
	edac_mc_del_mc(&op->dev);
err:
609
	devres_release_group(&op->dev, fsl_mc_err_probe);
610 611 612 613
	edac_mc_free(mci);
	return res;
}

614
int fsl_mc_err_remove(struct platform_device *op)
615 616
{
	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
617
	struct fsl_mc_pdata *pdata = mci->pvt_info;
618 619 620 621

	edac_dbg(0, "\n");

	if (edac_op_state == EDAC_OPSTATE_INT) {
622
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
623 624
	}

625 626 627
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
		  orig_ddr_err_disable);
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
628 629 630 631 632

	edac_mc_del_mc(&op->dev);
	edac_mc_free(mci);
	return 0;
}