edac.h 23.7 KB
Newer Older
D
Dave Jiang 已提交
1 2 3 4 5
/*
 * Generic EDAC defs
 *
 * Author: Dave Jiang <djiang@mvista.com>
 *
6
 * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
D
Dave Jiang 已提交
7 8 9 10 11 12 13 14
 * the terms of the GNU General Public License version 2. This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 *
 */
#ifndef _LINUX_EDAC_H_
#define _LINUX_EDAC_H_

A
Arun Sharma 已提交
15
#include <linux/atomic.h>
16
#include <linux/device.h>
17 18
#include <linux/completion.h>
#include <linux/workqueue.h>
19
#include <linux/debugfs.h>
20 21

struct device;
D
Dave Jiang 已提交
22 23 24 25 26 27 28

#define EDAC_OPSTATE_INVAL	-1
#define EDAC_OPSTATE_POLL	0
#define EDAC_OPSTATE_NMI	1
#define EDAC_OPSTATE_INT	2

extern int edac_op_state;
29
extern int edac_err_assert;
D
Dave Jiang 已提交
30
extern atomic_t edac_handlers;
31
extern struct bus_type edac_subsys;
D
Dave Jiang 已提交
32 33 34

extern int edac_handler_set(void);
extern void edac_atomic_assert_error(void);
35 36
extern struct bus_type *edac_get_sysfs_subsys(void);
extern void edac_put_sysfs_subsys(void);
D
Dave Jiang 已提交
37

38 39 40 41 42 43 44 45 46 47 48 49
static inline void opstate_init(void)
{
	switch (edac_op_state) {
	case EDAC_OPSTATE_POLL:
	case EDAC_OPSTATE_NMI:
		break;
	default:
		edac_op_state = EDAC_OPSTATE_POLL;
	}
	return;
}

50
/* Max length of a DIMM label*/
51 52
#define EDAC_MC_LABEL_LEN	31

53 54 55 56 57 58 59 60 61
/* Maximum size of the location string */
#define LOCATION_SIZE 80

/* Defines the maximum number of labels that can be reported */
#define EDAC_MAX_LABELS		8

/* String used to join two or more labels */
#define OTHER_LABEL " or "

62 63 64 65 66 67 68 69 70 71 72 73 74
/**
 * enum dev_type - describe the type of memory DRAM chips used at the stick
 * @DEV_UNKNOWN:	Can't be determined, or MC doesn't support detect it
 * @DEV_X1:		1 bit for data
 * @DEV_X2:		2 bits for data
 * @DEV_X4:		4 bits for data
 * @DEV_X8:		8 bits for data
 * @DEV_X16:		16 bits for data
 * @DEV_X32:		32 bits for data
 * @DEV_X64:		64 bits for data
 *
 * Typical values are x4 and x8.
 */
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
enum dev_type {
	DEV_UNKNOWN = 0,
	DEV_X1,
	DEV_X2,
	DEV_X4,
	DEV_X8,
	DEV_X16,
	DEV_X32,		/* Do these parts exist? */
	DEV_X64			/* Do these parts exist? */
};

#define DEV_FLAG_UNKNOWN	BIT(DEV_UNKNOWN)
#define DEV_FLAG_X1		BIT(DEV_X1)
#define DEV_FLAG_X2		BIT(DEV_X2)
#define DEV_FLAG_X4		BIT(DEV_X4)
#define DEV_FLAG_X8		BIT(DEV_X8)
#define DEV_FLAG_X16		BIT(DEV_X16)
#define DEV_FLAG_X32		BIT(DEV_X32)
#define DEV_FLAG_X64		BIT(DEV_X64)

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/**
 * enum hw_event_mc_err_type - type of the detected error
 *
 * @HW_EVENT_ERR_CORRECTED:	Corrected Error - Indicates that an ECC
 *				corrected error was detected
 * @HW_EVENT_ERR_UNCORRECTED:	Uncorrected Error - Indicates an error that
 *				can't be corrected by ECC, but it is not
 *				fatal (maybe it is on an unused memory area,
 *				or the memory controller could recover from
 *				it for example, by re-trying the operation).
 * @HW_EVENT_ERR_FATAL:		Fatal Error - Uncorrected error that could not
 *				be recovered.
 */
enum hw_event_mc_err_type {
	HW_EVENT_ERR_CORRECTED,
	HW_EVENT_ERR_UNCORRECTED,
	HW_EVENT_ERR_FATAL,
112
	HW_EVENT_ERR_INFO,
113 114
};

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static inline char *mc_event_error_type(const unsigned int err_type)
{
	switch (err_type) {
	case HW_EVENT_ERR_CORRECTED:
		return "Corrected";
	case HW_EVENT_ERR_UNCORRECTED:
		return "Uncorrected";
	case HW_EVENT_ERR_FATAL:
		return "Fatal";
	default:
	case HW_EVENT_ERR_INFO:
		return "Info";
	}
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/**
 * enum mem_type - memory types. For a more detailed reference, please see
 *			http://en.wikipedia.org/wiki/DRAM
 *
 * @MEM_EMPTY		Empty csrow
 * @MEM_RESERVED:	Reserved csrow type
 * @MEM_UNKNOWN:	Unknown csrow type
 * @MEM_FPM:		FPM - Fast Page Mode, used on systems up to 1995.
 * @MEM_EDO:		EDO - Extended data out, used on systems up to 1998.
 * @MEM_BEDO:		BEDO - Burst Extended data out, an EDO variant.
 * @MEM_SDR:		SDR - Single data rate SDRAM
 *			http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory
 *			They use 3 pins for chip select: Pins 0 and 2 are
 *			for rank 0; pins 1 and 3 are for rank 1, if the memory
 *			is dual-rank.
 * @MEM_RDR:		Registered SDR SDRAM
 * @MEM_DDR:		Double data rate SDRAM
 *			http://en.wikipedia.org/wiki/DDR_SDRAM
 * @MEM_RDDR:		Registered Double data rate SDRAM
 *			This is a variant of the DDR memories.
 *			A registered memory has a buffer inside it, hiding
 *			part of the memory details to the memory controller.
 * @MEM_RMBS:		Rambus DRAM, used on a few Pentium III/IV controllers.
 * @MEM_DDR2:		DDR2 RAM, as described at JEDEC JESD79-2F.
 *			Those memories are labed as "PC2-" instead of "PC" to
 *			differenciate from DDR.
 * @MEM_FB_DDR2:	Fully-Buffered DDR2, as described at JEDEC Std No. 205
 *			and JESD206.
 *			Those memories are accessed per DIMM slot, and not by
 *			a chip select signal.
 * @MEM_RDDR2:		Registered DDR2 RAM
 *			This is a variant of the DDR2 memories.
 * @MEM_XDR:		Rambus XDR
 *			It is an evolution of the original RAMBUS memories,
 *			created to compete with DDR2. Weren't used on any
 *			x86 arch, but cell_edac PPC memory controller uses it.
 * @MEM_DDR3:		DDR3 RAM
 * @MEM_RDDR3:		Registered DDR3 RAM
 *			This is a variant of the DDR3 memories.
 */
170
enum mem_type {
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	MEM_EMPTY = 0,
	MEM_RESERVED,
	MEM_UNKNOWN,
	MEM_FPM,
	MEM_EDO,
	MEM_BEDO,
	MEM_SDR,
	MEM_RDR,
	MEM_DDR,
	MEM_RDDR,
	MEM_RMBS,
	MEM_DDR2,
	MEM_FB_DDR2,
	MEM_RDDR2,
	MEM_XDR,
	MEM_DDR3,
	MEM_RDDR3,
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
};

#define MEM_FLAG_EMPTY		BIT(MEM_EMPTY)
#define MEM_FLAG_RESERVED	BIT(MEM_RESERVED)
#define MEM_FLAG_UNKNOWN	BIT(MEM_UNKNOWN)
#define MEM_FLAG_FPM		BIT(MEM_FPM)
#define MEM_FLAG_EDO		BIT(MEM_EDO)
#define MEM_FLAG_BEDO		BIT(MEM_BEDO)
#define MEM_FLAG_SDR		BIT(MEM_SDR)
#define MEM_FLAG_RDR		BIT(MEM_RDR)
#define MEM_FLAG_DDR		BIT(MEM_DDR)
#define MEM_FLAG_RDDR		BIT(MEM_RDDR)
#define MEM_FLAG_RMBS		BIT(MEM_RMBS)
#define MEM_FLAG_DDR2           BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2        BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2          BIT(MEM_RDDR2)
#define MEM_FLAG_XDR            BIT(MEM_XDR)
#define MEM_FLAG_DDR3		 BIT(MEM_DDR3)
#define MEM_FLAG_RDDR3		 BIT(MEM_RDDR3)

208 209 210 211 212 213 214 215 216 217 218 219 220
/**
 * enum edac-type - Error Detection and Correction capabilities and mode
 * @EDAC_UNKNOWN:	Unknown if ECC is available
 * @EDAC_NONE:		Doesn't support ECC
 * @EDAC_RESERVED:	Reserved ECC type
 * @EDAC_PARITY:	Detects parity errors
 * @EDAC_EC:		Error Checking - no correction
 * @EDAC_SECDED:	Single bit error correction, Double detection
 * @EDAC_S2ECD2ED:	Chipkill x2 devices - do these exist?
 * @EDAC_S4ECD4ED:	Chipkill x4 devices
 * @EDAC_S8ECD8ED:	Chipkill x8 devices
 * @EDAC_S16ECD16ED:	Chipkill x16 devices
 */
221
enum edac_type {
222 223 224 225 226 227 228 229 230 231
	EDAC_UNKNOWN =	0,
	EDAC_NONE,
	EDAC_RESERVED,
	EDAC_PARITY,
	EDAC_EC,
	EDAC_SECDED,
	EDAC_S2ECD2ED,
	EDAC_S4ECD4ED,
	EDAC_S8ECD8ED,
	EDAC_S16ECD16ED,
232 233 234 235 236 237 238 239 240 241 242 243
};

#define EDAC_FLAG_UNKNOWN	BIT(EDAC_UNKNOWN)
#define EDAC_FLAG_NONE		BIT(EDAC_NONE)
#define EDAC_FLAG_PARITY	BIT(EDAC_PARITY)
#define EDAC_FLAG_EC		BIT(EDAC_EC)
#define EDAC_FLAG_SECDED	BIT(EDAC_SECDED)
#define EDAC_FLAG_S2ECD2ED	BIT(EDAC_S2ECD2ED)
#define EDAC_FLAG_S4ECD4ED	BIT(EDAC_S4ECD4ED)
#define EDAC_FLAG_S8ECD8ED	BIT(EDAC_S8ECD8ED)
#define EDAC_FLAG_S16ECD16ED	BIT(EDAC_S16ECD16ED)

244 245 246 247 248 249 250 251 252 253 254 255 256
/**
 * enum scrub_type - scrubbing capabilities
 * @SCRUB_UNKNOWN		Unknown if scrubber is available
 * @SCRUB_NONE:			No scrubber
 * @SCRUB_SW_PROG:		SW progressive (sequential) scrubbing
 * @SCRUB_SW_SRC:		Software scrub only errors
 * @SCRUB_SW_PROG_SRC:		Progressive software scrub from an error
 * @SCRUB_SW_TUNABLE:		Software scrub frequency is tunable
 * @SCRUB_HW_PROG:		HW progressive (sequential) scrubbing
 * @SCRUB_HW_SRC:		Hardware scrub only errors
 * @SCRUB_HW_PROG_SRC:		Progressive hardware scrub from an error
 * SCRUB_HW_TUNABLE:		Hardware scrub frequency is tunable
 */
257
enum scrub_type {
258 259 260 261 262 263 264 265 266 267
	SCRUB_UNKNOWN =	0,
	SCRUB_NONE,
	SCRUB_SW_PROG,
	SCRUB_SW_SRC,
	SCRUB_SW_PROG_SRC,
	SCRUB_SW_TUNABLE,
	SCRUB_HW_PROG,
	SCRUB_HW_SRC,
	SCRUB_HW_PROG_SRC,
	SCRUB_HW_TUNABLE
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
};

#define SCRUB_FLAG_SW_PROG	BIT(SCRUB_SW_PROG)
#define SCRUB_FLAG_SW_SRC	BIT(SCRUB_SW_SRC)
#define SCRUB_FLAG_SW_PROG_SRC	BIT(SCRUB_SW_PROG_SRC)
#define SCRUB_FLAG_SW_TUN	BIT(SCRUB_SW_SCRUB_TUNABLE)
#define SCRUB_FLAG_HW_PROG	BIT(SCRUB_HW_PROG)
#define SCRUB_FLAG_HW_SRC	BIT(SCRUB_HW_SRC)
#define SCRUB_FLAG_HW_PROG_SRC	BIT(SCRUB_HW_PROG_SRC)
#define SCRUB_FLAG_HW_TUN	BIT(SCRUB_HW_TUNABLE)

/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */

/* EDAC internal operation states */
#define	OP_ALLOC		0x100
#define OP_RUNNING_POLL		0x201
#define OP_RUNNING_INTERRUPT	0x202
#define OP_RUNNING_POLL_INTR	0x203
#define OP_OFFLINE		0x300

/*
289
 * Concepts used at the EDAC subsystem
290
 *
291
 * There are several things to be aware of that aren't at all obvious:
292 293 294 295 296 297 298 299
 *
 * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
 *
 * These are some of the many terms that are thrown about that don't always
 * mean what people think they mean (Inconceivable!).  In the interest of
 * creating a common ground for discussion, terms and their definitions
 * will be established.
 *
300 301 302 303 304 305
 * Memory devices:	The individual DRAM chips on a memory stick.  These
 *			devices commonly output 4 and 8 bits each (x4, x8).
 *			Grouping several of these in parallel provides the
 *			number of bits that the memory controller expects:
 *			typically 72 bits, in order to provide 64 bits +
 *			8 bits of ECC data.
306 307
 *
 * Memory Stick:	A printed circuit board that aggregates multiple
308 309 310 311 312 313 314 315
 *			memory devices in parallel.  In general, this is the
 *			Field Replaceable Unit (FRU) which gets replaced, in
 *			the case of excessive errors. Most often it is also
 *			called DIMM (Dual Inline Memory Module).
 *
 * Memory Socket:	A physical connector on the motherboard that accepts
 *			a single memory stick. Also called as "slot" on several
 *			datasheets.
316
 *
317 318 319 320
 * Channel:		A memory controller channel, responsible to communicate
 *			with a group of DIMMs. Each channel has its own
 *			independent control (command) and data bus, and can
 *			be used independently or grouped with other channels.
321
 *
322 323 324 325 326 327 328 329 330 331 332 333
 * Branch:		It is typically the highest hierarchy on a
 *			Fully-Buffered DIMM memory controller.
 *			Typically, it contains two channels.
 *			Two channels at the same branch can be used in single
 *			mode or in lockstep mode.
 *			When lockstep is enabled, the cacheline is doubled,
 *			but it generally brings some performance penalty.
 *			Also, it is generally not possible to point to just one
 *			memory stick when an error occurs, as the error
 *			correction code is calculated using two DIMMs instead
 *			of one. Due to that, it is capable of correcting more
 *			errors than on single mode.
334
 *
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
 * Single-channel:	The data accessed by the memory controller is contained
 *			into one dimm only. E. g. if the data is 64 bits-wide,
 *			the data flows to the CPU using one 64 bits parallel
 *			access.
 *			Typically used with SDR, DDR, DDR2 and DDR3 memories.
 *			FB-DIMM and RAMBUS use a different concept for channel,
 *			so this concept doesn't apply there.
 *
 * Double-channel:	The data size accessed by the memory controller is
 *			interlaced into two dimms, accessed at the same time.
 *			E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
 *			the data flows to the CPU using a 128 bits parallel
 *			access.
 *
 * Chip-select row:	This is the name of the DRAM signal used to select the
 *			DRAM ranks to be accessed. Common chip-select rows for
 *			single channel are 64 bits, for dual channel 128 bits.
 *			It may not be visible by the memory controller, as some
 *			DIMM types have a memory buffer that can hide direct
 *			access to it from the Memory Controller.
355 356 357 358 359 360 361 362 363 364 365 366
 *
 * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memory.
 *			Motherboards commonly drive two chip-select pins to
 *			a memory stick. A single-ranked stick, will occupy
 *			only one of those rows. The other will be unused.
 *
 * Double-Ranked stick:	A double-ranked stick has two chip-select rows which
 *			access different sets of memory devices.  The two
 *			rows cannot be accessed concurrently.
 *
 * Double-sided stick:	DEPRECATED TERM, see Double-Ranked stick.
 *			A double-sided stick has two chip-select rows which
367 368
 *			access different sets of memory devices. The two
 *			rows cannot be accessed concurrently. "Double-sided"
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
 *			is irrespective of the memory devices being mounted
 *			on both sides of the memory stick.
 *
 * Socket set:		All of the memory sticks that are required for
 *			a single memory access or all of the memory sticks
 *			spanned by a chip-select row.  A single socket set
 *			has two chip-select rows and if double-sided sticks
 *			are used these will occupy those chip-select rows.
 *
 * Bank:		This term is avoided because it is unclear when
 *			needing to distinguish between chip-select rows and
 *			socket sets.
 *
 * Controller pages:
 *
 * Physical pages:
 *
 * Virtual pages:
 *
 *
 * STRUCTURE ORGANIZATION AND CHOICES
 *
 *
 *
 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
 */

396 397 398 399 400 401 402
/**
 * enum edac_mc_layer - memory controller hierarchy layer
 *
 * @EDAC_MC_LAYER_BRANCH:	memory layer is named "branch"
 * @EDAC_MC_LAYER_CHANNEL:	memory layer is named "channel"
 * @EDAC_MC_LAYER_SLOT:		memory layer is named "slot"
 * @EDAC_MC_LAYER_CHIP_SELECT:	memory layer is named "chip select"
403 404 405
 * @EDAC_MC_LAYER_ALL_MEM:	memory layout is unknown. All memory is mapped
 *				as a single memory area. This is used when
 *				retrieving errors from a firmware driven driver.
406 407 408 409 410 411 412 413 414
 *
 * This enum is used by the drivers to tell edac_mc_sysfs what name should
 * be used when describing a memory stick location.
 */
enum edac_mc_layer_type {
	EDAC_MC_LAYER_BRANCH,
	EDAC_MC_LAYER_CHANNEL,
	EDAC_MC_LAYER_SLOT,
	EDAC_MC_LAYER_CHIP_SELECT,
415
	EDAC_MC_LAYER_ALL_MEM,
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
};

/**
 * struct edac_mc_layer - describes the memory controller hierarchy
 * @layer:		layer type
 * @size:		number of components per layer. For example,
 *			if the channel layer has two channels, size = 2
 * @is_virt_csrow:	This layer is part of the "csrow" when old API
 *			compatibility mode is enabled. Otherwise, it is
 *			a channel
 */
struct edac_mc_layer {
	enum edac_mc_layer_type	type;
	unsigned		size;
	bool			is_virt_csrow;
};

/*
 * Maximum number of layers used by the memory controller to uniquely
 * identify a single memory stick.
 * NOTE: Changing this constant requires not only to change the constant
 * below, but also to change the existing code at the core, as there are
 * some code there that are optimized for 3 layers.
 */
#define EDAC_MAX_LAYERS		3

/**
443
 * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
444 445 446 447 448 449 450 451 452
 *		   for the element given by [layer0,layer1,layer2] position
 *
 * @layers:	a struct edac_mc_layer array, describing how many elements
 *		were allocated for each layer
 * @n_layers:	Number of layers at the @layers array
 * @layer0:	layer0 position
 * @layer1:	layer1 position. Unused if n_layers < 2
 * @layer2:	layer2 position. Unused if n_layers < 3
 *
453
 * For 1 layer, this macro returns &var[layer0] - &var
454
 * For 2 layers, this macro is similar to allocate a bi-dimensional array
455
 *		and to return "&var[layer0][layer1] - &var"
456
 * For 3 layers, this macro is similar to allocate a tri-dimensional array
457
 *		and to return "&var[layer0][layer1][layer2] - &var"
458 459 460 461 462 463 464
 *
 * A loop could be used here to make it more generic, but, as we only have
 * 3 layers, this is a little faster.
 * By design, layers can never be 0 or more than 3. If that ever happens,
 * a NULL is returned, causing an OOPS during the memory allocation routine,
 * with would point to the developer that he's doing something wrong.
 */
465 466
#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({		\
	int __i;							\
467
	if ((nlayers) == 1)						\
468
		__i = layer0;						\
469
	else if ((nlayers) == 2)					\
470
		__i = (layer1) + ((layers[1]).size * (layer0));		\
471
	else if ((nlayers) == 3)					\
472 473
		__i = (layer2) + ((layers[2]).size * ((layer1) +	\
			    ((layers[1]).size * (layer0))));		\
474
	else								\
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
		__i = -EINVAL;						\
	__i;								\
})

/**
 * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
 *		   for the element given by [layer0,layer1,layer2] position
 *
 * @layers:	a struct edac_mc_layer array, describing how many elements
 *		were allocated for each layer
 * @var:	name of the var where we want to get the pointer
 *		(like mci->dimms)
 * @n_layers:	Number of layers at the @layers array
 * @layer0:	layer0 position
 * @layer1:	layer1 position. Unused if n_layers < 2
 * @layer2:	layer2 position. Unused if n_layers < 3
 *
 * For 1 layer, this macro returns &var[layer0]
 * For 2 layers, this macro is similar to allocate a bi-dimensional array
 *		and to return "&var[layer0][layer1]"
 * For 3 layers, this macro is similar to allocate a tri-dimensional array
 *		and to return "&var[layer0][layer1][layer2]"
 */
#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({	\
	typeof(*var) __p;						\
	int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2);	\
	if (___i < 0)							\
502
		__p = NULL;						\
503 504
	else								\
		__p = (var)[___i];					\
505 506 507
	__p;								\
})

508
struct dimm_info {
509 510
	struct device dev;

511
	char label[EDAC_MC_LABEL_LEN + 1];	/* DIMM label on motherboard */
512 513 514 515 516

	/* Memory location data */
	unsigned location[EDAC_MAX_LAYERS];

	struct mem_ctl_info *mci;	/* the parent */
517 518 519 520 521 522

	u32 grain;		/* granularity of reported error in bytes */
	enum dev_type dtype;	/* memory device type */
	enum mem_type mtype;	/* memory dimm type */
	enum edac_type edac_mode;	/* EDAC mode for this dimm */

523
	u32 nr_pages;			/* number of pages on this dimm */
524

525
	unsigned csrow, cschannel;	/* Points to the old API data */
526 527
};

528 529 530 531 532 533 534 535
/**
 * struct rank_info - contains the information for one DIMM rank
 *
 * @chan_idx:	channel number where the rank is (typically, 0 or 1)
 * @ce_count:	number of correctable errors for this rank
 * @csrow:	A pointer to the chip select row structure (the parent
 *		structure). The location of the rank is given by
 *		the (csrow->csrow_idx, chan_idx) vector.
536 537 538 539 540 541
 * @dimm:	A pointer to the DIMM structure, where the DIMM label
 *		information is stored.
 *
 * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
 *	  This is a bad assumption, but it makes this patch easier. Later
 *	  patches in this series will fix this issue.
542 543 544
 */
struct rank_info {
	int chan_idx;
545 546
	struct csrow_info *csrow;
	struct dimm_info *dimm;
547 548

	u32 ce_count;		/* Correctable Errors for this csrow */
549 550 551
};

struct csrow_info {
552 553
	struct device dev;

554
	/* Used only by edac_mc_find_csrow_by_page() */
555 556
	unsigned long first_page;	/* first page number in csrow */
	unsigned long last_page;	/* last page number in csrow */
557
	unsigned long page_mask;	/* used for interleaving -
558 559
					 * 0UL for non intlv */

560 561
	int csrow_idx;			/* the chip-select row */

562 563
	u32 ue_count;		/* Uncorrectable Errors for this csrow */
	u32 ce_count;		/* Correctable Errors for this csrow */
564

565 566 567 568
	struct mem_ctl_info *mci;	/* the parent */

	/* channel information for this csrow */
	u32 nr_channels;
569
	struct rank_info **channels;
570 571
};

572 573 574 575 576 577 578
/*
 * struct errcount_attribute - used to store the several error counts
 */
struct errcount_attribute_data {
	int n_layers;
	int pos[EDAC_MAX_LAYERS];
	int layer0, layer1, layer2;
579 580
};

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/**
 * edac_raw_error_desc - Raw error report structure
 * @grain:			minimum granularity for an error report, in bytes
 * @error_count:		number of errors of the same type
 * @top_layer:			top layer of the error (layer[0])
 * @mid_layer:			middle layer of the error (layer[1])
 * @low_layer:			low layer of the error (layer[2])
 * @page_frame_number:		page where the error happened
 * @offset_in_page:		page offset
 * @syndrome:			syndrome of the error (or 0 if unknown or if
 * 				the syndrome is not applicable)
 * @msg:			error message
 * @location:			location of the error
 * @label:			label of the affected DIMM(s)
 * @other_detail:		other driver-specific detail about the error
 * @enable_per_layer_report:	if false, the error affects all layers
 *				(typically, a memory controller error)
 */
struct edac_raw_error_desc {
	/*
	 * NOTE: everything before grain won't be cleaned by
	 * edac_raw_error_desc_clean()
	 */
	char location[LOCATION_SIZE];
	char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
	long grain;

	/* the vars below and grain will be cleaned on every new error report */
	u16 error_count;
	int top_layer;
	int mid_layer;
	int low_layer;
	unsigned long page_frame_number;
	unsigned long offset_in_page;
	unsigned long syndrome;
	const char *msg;
	const char *other_detail;
	bool enable_per_layer_report;
};

621 622 623
/* MEMORY controller information structure
 */
struct mem_ctl_info {
624 625 626
	struct device			dev;
	struct bus_type			bus;

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	struct list_head link;	/* for global list of mem_ctl_info structs */

	struct module *owner;	/* Module owner of this control struct */

	unsigned long mtype_cap;	/* memory types supported by mc */
	unsigned long edac_ctl_cap;	/* Mem controller EDAC capabilities */
	unsigned long edac_cap;	/* configuration capabilities - this is
				 * closely related to edac_ctl_cap.  The
				 * difference is that the controller may be
				 * capable of s4ecd4ed which would be listed
				 * in edac_ctl_cap, but if channels aren't
				 * capable of s4ecd4ed then the edac_cap would
				 * not have that capability.
				 */
	unsigned long scrub_cap;	/* chipset scrub capabilities */
	enum scrub_type scrub_mode;	/* current scrub mode */

	/* Translates sdram memory scrub rate given in bytes/sec to the
	   internal representation and configures whatever else needs
	   to be configured.
	 */
	int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);

	/* Get the current sdram memory scrub rate from the internal
	   representation and converts it to the closest matching
	   bandwidth in bytes/sec.
	 */
	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);


	/* pointer to edac checking routine */
	void (*edac_check) (struct mem_ctl_info * mci);

	/*
	 * Remaps memory pages: controller pages to physical pages.
	 * For most MC's, this will be NULL.
	 */
	/* FIXME - why not send the phys page to begin with? */
	unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
					   unsigned long page);
	int mc_idx;
668
	struct csrow_info **csrows;
669 670
	unsigned nr_csrows, num_cschannel;

671 672 673 674 675 676 677 678 679
	/*
	 * Memory Controller hierarchy
	 *
	 * There are basically two types of memory controller: the ones that
	 * sees memory sticks ("dimms"), and the ones that sees memory ranks.
	 * All old memory controllers enumerate memories per rank, but most
	 * of the recent drivers enumerate memories per DIMM, instead.
	 * When the memory controller is per rank, mem_is_per_rank is true.
	 */
680 681 682
	unsigned n_layers;
	struct edac_mc_layer *layers;
	bool mem_is_per_rank;
683 684 685 686

	/*
	 * DIMM info. Will eventually remove the entire csrows_info some day
	 */
687
	unsigned tot_dimms;
688
	struct dimm_info **dimms;
689

690 691 692 693 694
	/*
	 * FIXME - what about controllers on other busses? - IDs must be
	 * unique.  dev pointer should be sufficiently unique, but
	 * BUS:SLOT.FUNC numbers may not be unique.
	 */
695
	struct device *pdev;
696 697 698 699 700 701 702
	const char *mod_name;
	const char *mod_ver;
	const char *ctl_name;
	const char *dev_name;
	void *pvt_info;
	unsigned long start_time;	/* mci load start time (in jiffies) */

703 704 705 706 707
	/*
	 * drivers shouldn't access those fields directly, as the core
	 * already handles that.
	 */
	u32 ce_noinfo_count, ue_noinfo_count;
708
	u32 ue_mc, ce_mc;
709 710
	u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];

711 712 713 714 715 716
	struct completion complete;

	/* Additional top controller level attributes, but specified
	 * by the low level driver.
	 *
	 * Set by the low level driver to provide attributes at the
717
	 * controller level.
718 719 720 721 722 723 724 725 726 727
	 * An array of structures, NULL terminated
	 *
	 * If attributes are desired, then set to array of attributes
	 * If no attributes are desired, leave NULL
	 */
	const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;

	/* work struct for this MC */
	struct delayed_work work;

728 729 730 731 732 733
	/*
	 * Used to report an error - by being at the global struct
	 * makes the memory allocated by the EDAC core
	 */
	struct edac_raw_error_desc error_desc;

734 735
	/* the internal state of this controller instance */
	int op_state;
736 737 738 739 740

#ifdef CONFIG_EDAC_DEBUG
	struct dentry *debugfs;
	u8 fake_inject_layer[EDAC_MAX_LAYERS];
	u32 fake_inject_ue;
741
	u16 fake_inject_count;
742
#endif
743 744
	__u8 csbased : 1,	/* csrow-based memory controller */
	     __resv  : 7;
745 746
};

D
Dave Jiang 已提交
747
#endif