edac.h 19.5 KB
Newer Older
D
Dave Jiang 已提交
1 2 3 4 5
/*
 * Generic EDAC defs
 *
 * Author: Dave Jiang <djiang@mvista.com>
 *
6
 * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
D
Dave Jiang 已提交
7 8 9 10 11 12 13 14
 * the terms of the GNU General Public License version 2. This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 *
 */
#ifndef _LINUX_EDAC_H_
#define _LINUX_EDAC_H_

A
Arun Sharma 已提交
15
#include <linux/atomic.h>
16
#include <linux/device.h>
17 18
#include <linux/completion.h>
#include <linux/workqueue.h>
19
#include <linux/debugfs.h>
20
#include <linux/numa.h>
21

22 23
#define EDAC_DEVICE_NAME_LEN	31

24
struct device;
D
Dave Jiang 已提交
25 26 27 28 29 30 31 32

#define EDAC_OPSTATE_INVAL	-1
#define EDAC_OPSTATE_POLL	0
#define EDAC_OPSTATE_NMI	1
#define EDAC_OPSTATE_INT	2

extern int edac_op_state;

B
Borislav Petkov 已提交
33
struct bus_type *edac_get_sysfs_subsys(void);
34

35 36 37 38 39 40 41 42 43 44 45 46
static inline void opstate_init(void)
{
	switch (edac_op_state) {
	case EDAC_OPSTATE_POLL:
	case EDAC_OPSTATE_NMI:
		break;
	default:
		edac_op_state = EDAC_OPSTATE_POLL;
	}
	return;
}

47
/* Max length of a DIMM label*/
48 49
#define EDAC_MC_LABEL_LEN	31

50
/* Maximum size of the location string */
51
#define LOCATION_SIZE 256
52 53 54 55 56 57 58

/* Defines the maximum number of labels that can be reported */
#define EDAC_MAX_LABELS		8

/* String used to join two or more labels */
#define OTHER_LABEL " or "

59 60 61 62 63 64 65 66 67 68 69 70 71
/**
 * enum dev_type - describe the type of memory DRAM chips used at the stick
 * @DEV_UNKNOWN:	Can't be determined, or MC doesn't support detect it
 * @DEV_X1:		1 bit for data
 * @DEV_X2:		2 bits for data
 * @DEV_X4:		4 bits for data
 * @DEV_X8:		8 bits for data
 * @DEV_X16:		16 bits for data
 * @DEV_X32:		32 bits for data
 * @DEV_X64:		64 bits for data
 *
 * Typical values are x4 and x8.
 */
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
enum dev_type {
	DEV_UNKNOWN = 0,
	DEV_X1,
	DEV_X2,
	DEV_X4,
	DEV_X8,
	DEV_X16,
	DEV_X32,		/* Do these parts exist? */
	DEV_X64			/* Do these parts exist? */
};

#define DEV_FLAG_UNKNOWN	BIT(DEV_UNKNOWN)
#define DEV_FLAG_X1		BIT(DEV_X1)
#define DEV_FLAG_X2		BIT(DEV_X2)
#define DEV_FLAG_X4		BIT(DEV_X4)
#define DEV_FLAG_X8		BIT(DEV_X8)
#define DEV_FLAG_X16		BIT(DEV_X16)
#define DEV_FLAG_X32		BIT(DEV_X32)
#define DEV_FLAG_X64		BIT(DEV_X64)

92 93 94 95 96 97 98 99 100 101
/**
 * enum hw_event_mc_err_type - type of the detected error
 *
 * @HW_EVENT_ERR_CORRECTED:	Corrected Error - Indicates that an ECC
 *				corrected error was detected
 * @HW_EVENT_ERR_UNCORRECTED:	Uncorrected Error - Indicates an error that
 *				can't be corrected by ECC, but it is not
 *				fatal (maybe it is on an unused memory area,
 *				or the memory controller could recover from
 *				it for example, by re-trying the operation).
102 103 104 105 106 107
 * @HW_EVENT_ERR_DEFERRED:	Deferred Error - Indicates an uncorrectable
 *				error whose handling is not urgent. This could
 *				be due to hardware data poisoning where the
 *				system can continue operation until the poisoned
 *				data is consumed. Preemptive measures may also
 *				be taken, e.g. offlining pages, etc.
108 109
 * @HW_EVENT_ERR_FATAL:		Fatal Error - Uncorrected error that could not
 *				be recovered.
110 111
 * @HW_EVENT_ERR_INFO:		Informational - The CPER spec defines a forth
 *				type of error: informational logs.
112 113 114 115
 */
enum hw_event_mc_err_type {
	HW_EVENT_ERR_CORRECTED,
	HW_EVENT_ERR_UNCORRECTED,
116
	HW_EVENT_ERR_DEFERRED,
117
	HW_EVENT_ERR_FATAL,
118
	HW_EVENT_ERR_INFO,
119 120
};

121 122 123 124 125 126 127
static inline char *mc_event_error_type(const unsigned int err_type)
{
	switch (err_type) {
	case HW_EVENT_ERR_CORRECTED:
		return "Corrected";
	case HW_EVENT_ERR_UNCORRECTED:
		return "Uncorrected";
128 129
	case HW_EVENT_ERR_DEFERRED:
		return "Deferred";
130 131 132 133 134 135 136 137
	case HW_EVENT_ERR_FATAL:
		return "Fatal";
	default:
	case HW_EVENT_ERR_INFO:
		return "Info";
	}
}

138 139 140 141
/**
 * enum mem_type - memory types. For a more detailed reference, please see
 *			http://en.wikipedia.org/wiki/DRAM
 *
142
 * @MEM_EMPTY:		Empty csrow
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
 * @MEM_RESERVED:	Reserved csrow type
 * @MEM_UNKNOWN:	Unknown csrow type
 * @MEM_FPM:		FPM - Fast Page Mode, used on systems up to 1995.
 * @MEM_EDO:		EDO - Extended data out, used on systems up to 1998.
 * @MEM_BEDO:		BEDO - Burst Extended data out, an EDO variant.
 * @MEM_SDR:		SDR - Single data rate SDRAM
 *			http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory
 *			They use 3 pins for chip select: Pins 0 and 2 are
 *			for rank 0; pins 1 and 3 are for rank 1, if the memory
 *			is dual-rank.
 * @MEM_RDR:		Registered SDR SDRAM
 * @MEM_DDR:		Double data rate SDRAM
 *			http://en.wikipedia.org/wiki/DDR_SDRAM
 * @MEM_RDDR:		Registered Double data rate SDRAM
 *			This is a variant of the DDR memories.
 *			A registered memory has a buffer inside it, hiding
 *			part of the memory details to the memory controller.
 * @MEM_RMBS:		Rambus DRAM, used on a few Pentium III/IV controllers.
 * @MEM_DDR2:		DDR2 RAM, as described at JEDEC JESD79-2F.
162 163
 *			Those memories are labeled as "PC2-" instead of "PC" to
 *			differentiate from DDR.
164 165 166 167 168 169 170 171 172 173 174 175 176
 * @MEM_FB_DDR2:	Fully-Buffered DDR2, as described at JEDEC Std No. 205
 *			and JESD206.
 *			Those memories are accessed per DIMM slot, and not by
 *			a chip select signal.
 * @MEM_RDDR2:		Registered DDR2 RAM
 *			This is a variant of the DDR2 memories.
 * @MEM_XDR:		Rambus XDR
 *			It is an evolution of the original RAMBUS memories,
 *			created to compete with DDR2. Weren't used on any
 *			x86 arch, but cell_edac PPC memory controller uses it.
 * @MEM_DDR3:		DDR3 RAM
 * @MEM_RDDR3:		Registered DDR3 RAM
 *			This is a variant of the DDR3 memories.
Y
Yazen Ghannam 已提交
177
 * @MEM_LRDDR3:		Load-Reduced DDR3 memory.
Q
Qiuxu Zhuo 已提交
178
 * @MEM_LPDDR3:		Low-Power DDR3 memory.
179
 * @MEM_DDR4:		Unbuffered DDR4 RAM
A
Aristeu Rozanski 已提交
180 181
 * @MEM_RDDR4:		Registered DDR4 RAM
 *			This is a variant of the DDR4 memories.
Y
Yazen Ghannam 已提交
182
 * @MEM_LRDDR4:		Load-Reduced DDR4 memory.
Q
Qiuxu Zhuo 已提交
183
 * @MEM_LPDDR4:		Low-Power DDR4 memory.
Q
Qiuxu Zhuo 已提交
184
 * @MEM_DDR5:		Unbuffered DDR5 RAM
185 186
 * @MEM_RDDR5:		Registered DDR5 RAM
 * @MEM_LRDDR5:		Load-Reduced DDR5 memory.
187
 * @MEM_NVDIMM:		Non-volatile RAM
Q
Qiuxu Zhuo 已提交
188
 * @MEM_WIO2:		Wide I/O 2.
189
 * @MEM_HBM2:		High bandwidth Memory Gen 2.
190
 */
191
enum mem_type {
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	MEM_EMPTY = 0,
	MEM_RESERVED,
	MEM_UNKNOWN,
	MEM_FPM,
	MEM_EDO,
	MEM_BEDO,
	MEM_SDR,
	MEM_RDR,
	MEM_DDR,
	MEM_RDDR,
	MEM_RMBS,
	MEM_DDR2,
	MEM_FB_DDR2,
	MEM_RDDR2,
	MEM_XDR,
	MEM_DDR3,
	MEM_RDDR3,
209
	MEM_LRDDR3,
Q
Qiuxu Zhuo 已提交
210
	MEM_LPDDR3,
A
Aristeu Rozanski 已提交
211 212
	MEM_DDR4,
	MEM_RDDR4,
Y
Yazen Ghannam 已提交
213
	MEM_LRDDR4,
Q
Qiuxu Zhuo 已提交
214
	MEM_LPDDR4,
Q
Qiuxu Zhuo 已提交
215
	MEM_DDR5,
216 217
	MEM_RDDR5,
	MEM_LRDDR5,
218
	MEM_NVDIMM,
Q
Qiuxu Zhuo 已提交
219
	MEM_WIO2,
220
	MEM_HBM2,
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
};

#define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
#define MEM_FLAG_RESERVED	BIT(MEM_RESERVED)
#define MEM_FLAG_UNKNOWN	BIT(MEM_UNKNOWN)
#define MEM_FLAG_FPM		BIT(MEM_FPM)
#define MEM_FLAG_EDO		BIT(MEM_EDO)
#define MEM_FLAG_BEDO		BIT(MEM_BEDO)
#define MEM_FLAG_SDR		BIT(MEM_SDR)
#define MEM_FLAG_RDR		BIT(MEM_RDR)
#define MEM_FLAG_DDR		BIT(MEM_DDR)
#define MEM_FLAG_RDDR		BIT(MEM_RDDR)
#define MEM_FLAG_RMBS		BIT(MEM_RMBS)
#define MEM_FLAG_DDR2           BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2        BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2          BIT(MEM_RDDR2)
#define MEM_FLAG_XDR            BIT(MEM_XDR)
J
Jim Snow 已提交
238 239
#define MEM_FLAG_DDR3           BIT(MEM_DDR3)
#define MEM_FLAG_RDDR3          BIT(MEM_RDDR3)
Q
Qiuxu Zhuo 已提交
240
#define MEM_FLAG_LPDDR3         BIT(MEM_LPDDR3)
J
Jim Snow 已提交
241 242
#define MEM_FLAG_DDR4           BIT(MEM_DDR4)
#define MEM_FLAG_RDDR4          BIT(MEM_RDDR4)
Y
Yazen Ghannam 已提交
243
#define MEM_FLAG_LRDDR4         BIT(MEM_LRDDR4)
Q
Qiuxu Zhuo 已提交
244
#define MEM_FLAG_LPDDR4         BIT(MEM_LPDDR4)
Q
Qiuxu Zhuo 已提交
245
#define MEM_FLAG_DDR5           BIT(MEM_DDR5)
246 247
#define MEM_FLAG_RDDR5          BIT(MEM_RDDR5)
#define MEM_FLAG_LRDDR5         BIT(MEM_LRDDR5)
248
#define MEM_FLAG_NVDIMM         BIT(MEM_NVDIMM)
Q
Qiuxu Zhuo 已提交
249
#define MEM_FLAG_WIO2		BIT(MEM_WIO2)
250
#define MEM_FLAG_HBM2		BIT(MEM_HBM2)
251

252
/**
253
 * enum edac_type - Error Detection and Correction capabilities and mode
254 255 256 257 258 259 260 261 262 263 264
 * @EDAC_UNKNOWN:	Unknown if ECC is available
 * @EDAC_NONE:		Doesn't support ECC
 * @EDAC_RESERVED:	Reserved ECC type
 * @EDAC_PARITY:	Detects parity errors
 * @EDAC_EC:		Error Checking - no correction
 * @EDAC_SECDED:	Single bit error correction, Double detection
 * @EDAC_S2ECD2ED:	Chipkill x2 devices - do these exist?
 * @EDAC_S4ECD4ED:	Chipkill x4 devices
 * @EDAC_S8ECD8ED:	Chipkill x8 devices
 * @EDAC_S16ECD16ED:	Chipkill x16 devices
 */
265
enum edac_type {
266 267 268 269 270 271 272 273 274 275
	EDAC_UNKNOWN =	0,
	EDAC_NONE,
	EDAC_RESERVED,
	EDAC_PARITY,
	EDAC_EC,
	EDAC_SECDED,
	EDAC_S2ECD2ED,
	EDAC_S4ECD4ED,
	EDAC_S8ECD8ED,
	EDAC_S16ECD16ED,
276 277 278 279 280 281 282 283 284 285 286 287
};

#define EDAC_FLAG_UNKNOWN	BIT(EDAC_UNKNOWN)
#define EDAC_FLAG_NONE		BIT(EDAC_NONE)
#define EDAC_FLAG_PARITY	BIT(EDAC_PARITY)
#define EDAC_FLAG_EC		BIT(EDAC_EC)
#define EDAC_FLAG_SECDED	BIT(EDAC_SECDED)
#define EDAC_FLAG_S2ECD2ED	BIT(EDAC_S2ECD2ED)
#define EDAC_FLAG_S4ECD4ED	BIT(EDAC_S4ECD4ED)
#define EDAC_FLAG_S8ECD8ED	BIT(EDAC_S8ECD8ED)
#define EDAC_FLAG_S16ECD16ED	BIT(EDAC_S16ECD16ED)

288 289
/**
 * enum scrub_type - scrubbing capabilities
290
 * @SCRUB_UNKNOWN:		Unknown if scrubber is available
291 292 293 294 295 296 297 298
 * @SCRUB_NONE:			No scrubber
 * @SCRUB_SW_PROG:		SW progressive (sequential) scrubbing
 * @SCRUB_SW_SRC:		Software scrub only errors
 * @SCRUB_SW_PROG_SRC:		Progressive software scrub from an error
 * @SCRUB_SW_TUNABLE:		Software scrub frequency is tunable
 * @SCRUB_HW_PROG:		HW progressive (sequential) scrubbing
 * @SCRUB_HW_SRC:		Hardware scrub only errors
 * @SCRUB_HW_PROG_SRC:		Progressive hardware scrub from an error
299
 * @SCRUB_HW_TUNABLE:		Hardware scrub frequency is tunable
300
 */
301
enum scrub_type {
302 303 304 305 306 307 308 309 310 311
	SCRUB_UNKNOWN =	0,
	SCRUB_NONE,
	SCRUB_SW_PROG,
	SCRUB_SW_SRC,
	SCRUB_SW_PROG_SRC,
	SCRUB_SW_TUNABLE,
	SCRUB_HW_PROG,
	SCRUB_HW_SRC,
	SCRUB_HW_PROG_SRC,
	SCRUB_HW_TUNABLE
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
};

#define SCRUB_FLAG_SW_PROG	BIT(SCRUB_SW_PROG)
#define SCRUB_FLAG_SW_SRC	BIT(SCRUB_SW_SRC)
#define SCRUB_FLAG_SW_PROG_SRC	BIT(SCRUB_SW_PROG_SRC)
#define SCRUB_FLAG_SW_TUN	BIT(SCRUB_SW_SCRUB_TUNABLE)
#define SCRUB_FLAG_HW_PROG	BIT(SCRUB_HW_PROG)
#define SCRUB_FLAG_HW_SRC	BIT(SCRUB_HW_SRC)
#define SCRUB_FLAG_HW_PROG_SRC	BIT(SCRUB_HW_PROG_SRC)
#define SCRUB_FLAG_HW_TUN	BIT(SCRUB_HW_TUNABLE)

/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */

/* EDAC internal operation states */
#define	OP_ALLOC		0x100
#define OP_RUNNING_POLL		0x201
#define OP_RUNNING_INTERRUPT	0x202
#define OP_RUNNING_POLL_INTR	0x203
#define OP_OFFLINE		0x300

332
/**
333
 * enum edac_mc_layer_type - memory controller hierarchy layer
334 335 336 337 338
 *
 * @EDAC_MC_LAYER_BRANCH:	memory layer is named "branch"
 * @EDAC_MC_LAYER_CHANNEL:	memory layer is named "channel"
 * @EDAC_MC_LAYER_SLOT:		memory layer is named "slot"
 * @EDAC_MC_LAYER_CHIP_SELECT:	memory layer is named "chip select"
339 340 341
 * @EDAC_MC_LAYER_ALL_MEM:	memory layout is unknown. All memory is mapped
 *				as a single memory area. This is used when
 *				retrieving errors from a firmware driven driver.
342 343 344 345 346 347 348 349 350
 *
 * This enum is used by the drivers to tell edac_mc_sysfs what name should
 * be used when describing a memory stick location.
 */
enum edac_mc_layer_type {
	EDAC_MC_LAYER_BRANCH,
	EDAC_MC_LAYER_CHANNEL,
	EDAC_MC_LAYER_SLOT,
	EDAC_MC_LAYER_CHIP_SELECT,
351
	EDAC_MC_LAYER_ALL_MEM,
352 353 354 355
};

/**
 * struct edac_mc_layer - describes the memory controller hierarchy
356
 * @type:		layer type
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
 * @size:		number of components per layer. For example,
 *			if the channel layer has two channels, size = 2
 * @is_virt_csrow:	This layer is part of the "csrow" when old API
 *			compatibility mode is enabled. Otherwise, it is
 *			a channel
 */
struct edac_mc_layer {
	enum edac_mc_layer_type	type;
	unsigned		size;
	bool			is_virt_csrow;
};

/*
 * Maximum number of layers used by the memory controller to uniquely
 * identify a single memory stick.
 * NOTE: Changing this constant requires not only to change the constant
 * below, but also to change the existing code at the core, as there are
 * some code there that are optimized for 3 layers.
 */
#define EDAC_MAX_LAYERS		3

378
struct dimm_info {
379 380
	struct device dev;

381
	char label[EDAC_MC_LABEL_LEN + 1];	/* DIMM label on motherboard */
382 383

	/* Memory location data */
384
	unsigned int location[EDAC_MAX_LAYERS];
385 386

	struct mem_ctl_info *mci;	/* the parent */
387
	unsigned int idx;		/* index within the parent dimm array */
388 389 390 391 392 393

	u32 grain;		/* granularity of reported error in bytes */
	enum dev_type dtype;	/* memory device type */
	enum mem_type mtype;	/* memory dimm type */
	enum edac_type edac_mode;	/* EDAC mode for this dimm */

394
	u32 nr_pages;			/* number of pages on this dimm */
395

396
	unsigned int csrow, cschannel;	/* Points to the old API data */
397 398

	u16 smbios_handle;              /* Handle for SMBIOS type 17 */
399 400 401

	u32 ce_count;
	u32 ue_count;
402 403
};

404 405 406 407 408 409 410 411
/**
 * struct rank_info - contains the information for one DIMM rank
 *
 * @chan_idx:	channel number where the rank is (typically, 0 or 1)
 * @ce_count:	number of correctable errors for this rank
 * @csrow:	A pointer to the chip select row structure (the parent
 *		structure). The location of the rank is given by
 *		the (csrow->csrow_idx, chan_idx) vector.
412 413 414 415 416 417
 * @dimm:	A pointer to the DIMM structure, where the DIMM label
 *		information is stored.
 *
 * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
 *	  This is a bad assumption, but it makes this patch easier. Later
 *	  patches in this series will fix this issue.
418 419 420
 */
struct rank_info {
	int chan_idx;
421 422
	struct csrow_info *csrow;
	struct dimm_info *dimm;
423 424

	u32 ce_count;		/* Correctable Errors for this csrow */
425 426 427
};

struct csrow_info {
428 429
	struct device dev;

430
	/* Used only by edac_mc_find_csrow_by_page() */
431 432
	unsigned long first_page;	/* first page number in csrow */
	unsigned long last_page;	/* last page number in csrow */
433
	unsigned long page_mask;	/* used for interleaving -
434 435
					 * 0UL for non intlv */

436 437
	int csrow_idx;			/* the chip-select row */

438 439
	u32 ue_count;		/* Uncorrectable Errors for this csrow */
	u32 ce_count;		/* Correctable Errors for this csrow */
440

441 442 443 444
	struct mem_ctl_info *mci;	/* the parent */

	/* channel information for this csrow */
	u32 nr_channels;
445
	struct rank_info **channels;
446 447
};

448 449 450 451 452 453 454
/*
 * struct errcount_attribute - used to store the several error counts
 */
struct errcount_attribute_data {
	int n_layers;
	int pos[EDAC_MAX_LAYERS];
	int layer0, layer1, layer2;
455 456
};

457
/**
458
 * struct edac_raw_error_desc - Raw error report structure
459 460
 * @grain:			minimum granularity for an error report, in bytes
 * @error_count:		number of errors of the same type
461
 * @type:			severity of the error (CE/UE/Fatal)
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
 * @top_layer:			top layer of the error (layer[0])
 * @mid_layer:			middle layer of the error (layer[1])
 * @low_layer:			low layer of the error (layer[2])
 * @page_frame_number:		page where the error happened
 * @offset_in_page:		page offset
 * @syndrome:			syndrome of the error (or 0 if unknown or if
 * 				the syndrome is not applicable)
 * @msg:			error message
 * @location:			location of the error
 * @label:			label of the affected DIMM(s)
 * @other_detail:		other driver-specific detail about the error
 */
struct edac_raw_error_desc {
	char location[LOCATION_SIZE];
	char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
	long grain;

	u16 error_count;
480
	enum hw_event_mc_err_type type;
481 482 483 484 485 486 487 488 489 490
	int top_layer;
	int mid_layer;
	int low_layer;
	unsigned long page_frame_number;
	unsigned long offset_in_page;
	unsigned long syndrome;
	const char *msg;
	const char *other_detail;
};

491 492 493
/* MEMORY controller information structure
 */
struct mem_ctl_info {
494
	struct device			dev;
B
Borislav Petkov 已提交
495
	struct bus_type			*bus;
496

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	struct list_head link;	/* for global list of mem_ctl_info structs */

	struct module *owner;	/* Module owner of this control struct */

	unsigned long mtype_cap;	/* memory types supported by mc */
	unsigned long edac_ctl_cap;	/* Mem controller EDAC capabilities */
	unsigned long edac_cap;	/* configuration capabilities - this is
				 * closely related to edac_ctl_cap.  The
				 * difference is that the controller may be
				 * capable of s4ecd4ed which would be listed
				 * in edac_ctl_cap, but if channels aren't
				 * capable of s4ecd4ed then the edac_cap would
				 * not have that capability.
				 */
	unsigned long scrub_cap;	/* chipset scrub capabilities */
	enum scrub_type scrub_mode;	/* current scrub mode */

	/* Translates sdram memory scrub rate given in bytes/sec to the
	   internal representation and configures whatever else needs
	   to be configured.
	 */
	int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);

	/* Get the current sdram memory scrub rate from the internal
	   representation and converts it to the closest matching
	   bandwidth in bytes/sec.
	 */
	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);


	/* pointer to edac checking routine */
	void (*edac_check) (struct mem_ctl_info * mci);

	/*
	 * Remaps memory pages: controller pages to physical pages.
	 * For most MC's, this will be NULL.
	 */
	/* FIXME - why not send the phys page to begin with? */
	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
					   unsigned long page);
	int mc_idx;
538
	struct csrow_info **csrows;
539
	unsigned int nr_csrows, num_cschannel;
540

541 542 543 544 545 546 547
	/*
	 * Memory Controller hierarchy
	 *
	 * There are basically two types of memory controller: the ones that
	 * sees memory sticks ("dimms"), and the ones that sees memory ranks.
	 * All old memory controllers enumerate memories per rank, but most
	 * of the recent drivers enumerate memories per DIMM, instead.
548
	 * When the memory controller is per rank, csbased is true.
549
	 */
550
	unsigned int n_layers;
551
	struct edac_mc_layer *layers;
552
	bool csbased;
553 554 555 556

	/*
	 * DIMM info. Will eventually remove the entire csrows_info some day
	 */
557
	unsigned int tot_dimms;
558
	struct dimm_info **dimms;
559

560 561 562 563 564
	/*
	 * FIXME - what about controllers on other busses? - IDs must be
	 * unique.  dev pointer should be sufficiently unique, but
	 * BUS:SLOT.FUNC numbers may not be unique.
	 */
565
	struct device *pdev;
566 567 568 569 570 571
	const char *mod_name;
	const char *ctl_name;
	const char *dev_name;
	void *pvt_info;
	unsigned long start_time;	/* mci load start time (in jiffies) */

572 573 574 575 576
	/*
	 * drivers shouldn't access those fields directly, as the core
	 * already handles that.
	 */
	u32 ce_noinfo_count, ue_noinfo_count;
577
	u32 ue_mc, ce_mc;
578

579 580 581 582 583 584
	struct completion complete;

	/* Additional top controller level attributes, but specified
	 * by the low level driver.
	 *
	 * Set by the low level driver to provide attributes at the
585
	 * controller level.
586 587 588 589 590 591 592 593 594 595
	 * An array of structures, NULL terminated
	 *
	 * If attributes are desired, then set to array of attributes
	 * If no attributes are desired, leave NULL
	 */
	const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;

	/* work struct for this MC */
	struct delayed_work work;

596 597 598 599 600 601
	/*
	 * Used to report an error - by being at the global struct
	 * makes the memory allocated by the EDAC core
	 */
	struct edac_raw_error_desc error_desc;

602 603
	/* the internal state of this controller instance */
	int op_state;
604 605 606

	struct dentry *debugfs;
	u8 fake_inject_layer[EDAC_MAX_LAYERS];
607
	bool fake_inject_ue;
608
	u16 fake_inject_count;
609
};
610

611 612 613 614 615 616 617
#define mci_for_each_dimm(mci, dimm)				\
	for ((dimm) = (mci)->dimms[0];				\
	     (dimm);						\
	     (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms	\
		     ? (mci)->dimms[(dimm)->idx + 1]		\
		     : NULL)

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
/**
 * edac_get_dimm - Get DIMM info from a memory controller given by
 *                 [layer0,layer1,layer2] position
 *
 * @mci:	MC descriptor struct mem_ctl_info
 * @layer0:	layer0 position
 * @layer1:	layer1 position. Unused if n_layers < 2
 * @layer2:	layer2 position. Unused if n_layers < 3
 *
 * For 1 layer, this function returns "dimms[layer0]";
 *
 * For 2 layers, this function is similar to allocating a two-dimensional
 * array and returning "dimms[layer0][layer1]";
 *
 * For 3 layers, this function is similar to allocating a tri-dimensional
 * array and returning "dimms[layer0][layer1][layer2]";
 */
static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
	int layer0, int layer1, int layer2)
{
	int index;

	if (layer0 < 0
	    || (mci->n_layers > 1 && layer1 < 0)
	    || (mci->n_layers > 2 && layer2 < 0))
		return NULL;

	index = layer0;

	if (mci->n_layers > 1)
		index = index * mci->layers[1].size + layer1;

	if (mci->n_layers > 2)
		index = index * mci->layers[2].size + layer2;

653 654 655 656 657 658 659
	if (index < 0 || index >= mci->tot_dimms)
		return NULL;

	if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
		return NULL;

	return mci->dimms[index];
660 661
}
#endif /* _LINUX_EDAC_H_ */