edac_mc_sysfs.c 29.0 KB
Newer Older
1 2
/*
 * edac_mc kernel module
3 4
 * (C) 2005-2007 Linux Networx (http://lnxi.com)
 *
5 6 7
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
8
 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9
 *
10
 * (c) 2012-2013 - Mauro Carvalho Chehab
11 12
 *	The entire API were re-written, and ported to use struct device
 *
13 14 15
 */

#include <linux/ctype.h>
16
#include <linux/slab.h>
17
#include <linux/edac.h>
18
#include <linux/bug.h>
19
#include <linux/pm_runtime.h>
20
#include <linux/uaccess.h>
21

22
#include "edac_core.h"
23 24 25
#include "edac_module.h"

/* MC EDAC Controls, setable by module parameter, and sysfs */
D
Dave Jiang 已提交
26 27
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
28
static int edac_mc_panic_on_ue;
D
Dave Jiang 已提交
29
static int edac_mc_poll_msec = 1000;
30 31

/* Getter functions for above */
D
Dave Jiang 已提交
32
int edac_mc_get_log_ue(void)
33
{
D
Dave Jiang 已提交
34
	return edac_mc_log_ue;
35 36
}

D
Dave Jiang 已提交
37
int edac_mc_get_log_ce(void)
38
{
D
Dave Jiang 已提交
39
	return edac_mc_log_ce;
40 41
}

D
Dave Jiang 已提交
42
int edac_mc_get_panic_on_ue(void)
43
{
D
Dave Jiang 已提交
44
	return edac_mc_panic_on_ue;
45 46
}

47 48 49
/* this is temporary */
int edac_mc_get_poll_msec(void)
{
D
Dave Jiang 已提交
50
	return edac_mc_poll_msec;
51 52
}

A
Arthur Jones 已提交
53 54
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
55
	unsigned long l;
A
Arthur Jones 已提交
56 57 58 59 60
	int ret;

	if (!val)
		return -EINVAL;

61
	ret = kstrtoul(val, 0, &l);
62 63
	if (ret)
		return ret;
64 65

	if (l < 1000)
A
Arthur Jones 已提交
66
		return -EINVAL;
67 68

	*((unsigned long *)kp->arg) = l;
A
Arthur Jones 已提交
69 70 71 72 73 74 75

	/* notify edac_mc engine to reset the poll period */
	edac_mc_reset_delay_period(l);

	return 0;
}

76
/* Parameter declarations for above */
D
Dave Jiang 已提交
77 78 79 80
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
81
		 "Log uncorrectable error to console: 0=off 1=on");
D
Dave Jiang 已提交
82 83
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
84
		 "Log correctable error to console: 0=off 1=on");
A
Arthur Jones 已提交
85 86
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
		  &edac_mc_poll_msec, 0644);
D
Dave Jiang 已提交
87
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
88

89
static struct device *mci_pdev;
90

91 92 93
/*
 * various constants for Memory Controllers
 */
94
static const char * const mem_types[] = {
95 96 97 98 99 100 101 102 103 104
	[MEM_EMPTY] = "Empty",
	[MEM_RESERVED] = "Reserved",
	[MEM_UNKNOWN] = "Unknown",
	[MEM_FPM] = "FPM",
	[MEM_EDO] = "EDO",
	[MEM_BEDO] = "BEDO",
	[MEM_SDR] = "Unbuffered-SDR",
	[MEM_RDR] = "Registered-SDR",
	[MEM_DDR] = "Unbuffered-DDR",
	[MEM_RDDR] = "Registered-DDR",
105 106 107
	[MEM_RMBS] = "RMBS",
	[MEM_DDR2] = "Unbuffered-DDR2",
	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
108
	[MEM_RDDR2] = "Registered-DDR2",
109 110
	[MEM_XDR] = "XDR",
	[MEM_DDR3] = "Unbuffered-DDR3",
A
Aristeu Rozanski 已提交
111 112 113
	[MEM_RDDR3] = "Registered-DDR3",
	[MEM_DDR4] = "Unbuffered-DDR4",
	[MEM_RDDR4] = "Registered-DDR4"
114 115
};

116
static const char * const dev_types[] = {
117 118 119 120 121 122 123 124 125 126
	[DEV_UNKNOWN] = "Unknown",
	[DEV_X1] = "x1",
	[DEV_X2] = "x2",
	[DEV_X4] = "x4",
	[DEV_X8] = "x8",
	[DEV_X16] = "x16",
	[DEV_X32] = "x32",
	[DEV_X64] = "x64"
};

127
static const char * const edac_caps[] = {
128 129 130 131 132 133 134 135 136 137 138 139
	[EDAC_UNKNOWN] = "Unknown",
	[EDAC_NONE] = "None",
	[EDAC_RESERVED] = "Reserved",
	[EDAC_PARITY] = "PARITY",
	[EDAC_EC] = "EC",
	[EDAC_SECDED] = "SECDED",
	[EDAC_S2ECD2ED] = "S2ECD2ED",
	[EDAC_S4ECD4ED] = "S4ECD4ED",
	[EDAC_S8ECD8ED] = "S8ECD8ED",
	[EDAC_S16ECD16ED] = "S16ECD16ED"
};

140
#ifdef CONFIG_EDAC_LEGACY_SYSFS
141 142 143 144 145 146 147 148 149
/*
 * EDAC sysfs CSROW data structures and methods
 */

#define to_csrow(k) container_of(k, struct csrow_info, dev)

/*
 * We need it to avoid namespace conflicts between the legacy API
 * and the per-dimm/per-rank one
150
 */
151
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
152
	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
153 154 155 156 157 158 159 160 161 162 163

struct dev_ch_attribute {
	struct device_attribute attr;
	int channel;
};

#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
	struct dev_ch_attribute dev_attr_legacy_##_name = \
		{ __ATTR(_name, _mode, _show, _store), (_var) }

#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
164 165

/* Set of more default csrow<id> attribute show/store functions */
166 167
static ssize_t csrow_ue_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
168
{
169 170
	struct csrow_info *csrow = to_csrow(dev);

171
	return sprintf(data, "%u\n", csrow->ue_count);
172 173
}

174 175
static ssize_t csrow_ce_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
176
{
177 178
	struct csrow_info *csrow = to_csrow(dev);

179
	return sprintf(data, "%u\n", csrow->ce_count);
180 181
}

182 183
static ssize_t csrow_size_show(struct device *dev,
			       struct device_attribute *mattr, char *data)
184
{
185
	struct csrow_info *csrow = to_csrow(dev);
186 187 188 189
	int i;
	u32 nr_pages = 0;

	for (i = 0; i < csrow->nr_channels; i++)
190
		nr_pages += csrow->channels[i]->dimm->nr_pages;
191
	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
192 193
}

194 195
static ssize_t csrow_mem_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
196
{
197 198
	struct csrow_info *csrow = to_csrow(dev);

199
	return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
200 201
}

202 203
static ssize_t csrow_dev_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
204
{
205 206
	struct csrow_info *csrow = to_csrow(dev);

207
	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
208 209
}

210 211 212
static ssize_t csrow_edac_mode_show(struct device *dev,
				    struct device_attribute *mattr,
				    char *data)
213
{
214 215
	struct csrow_info *csrow = to_csrow(dev);

216
	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
217 218 219
}

/* show/store functions for DIMM Label attributes */
220 221 222
static ssize_t channel_dimm_label_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
223
{
224 225
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
226
	struct rank_info *rank = csrow->channels[chan];
227

228
	/* if field has not been initialized, there is nothing to send */
229
	if (!rank->dimm->label[0])
230 231 232
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
233
			rank->dimm->label);
234 235
}

236 237 238
static ssize_t channel_dimm_label_store(struct device *dev,
					struct device_attribute *mattr,
					const char *data, size_t count)
239
{
240 241
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
242
	struct rank_info *rank = csrow->channels[chan];
243

244 245
	ssize_t max_size = 0;

246
	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
247 248
	strncpy(rank->dimm->label, data, max_size);
	rank->dimm->label[max_size] = '\0';
249 250 251 252 253

	return max_size;
}

/* show function for dynamic chX_ce_count attribute */
254 255
static ssize_t channel_ce_count_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
256
{
257 258
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
259
	struct rank_info *rank = csrow->channels[chan];
260 261

	return sprintf(data, "%u\n", rank->ce_count);
262 263
}

264 265 266 267 268 269 270
/* cwrow<id>/attribute files */
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
271

272 273 274 275 276 277 278 279 280 281
/* default attributes of the CSROW<id> object */
static struct attribute *csrow_attrs[] = {
	&dev_attr_legacy_dev_type.attr,
	&dev_attr_legacy_mem_type.attr,
	&dev_attr_legacy_edac_mode.attr,
	&dev_attr_legacy_size_mb.attr,
	&dev_attr_legacy_ue_count.attr,
	&dev_attr_legacy_ce_count.attr,
	NULL,
};
282

283 284 285
static struct attribute_group csrow_attr_grp = {
	.attrs	= csrow_attrs,
};
286

287 288 289 290
static const struct attribute_group *csrow_attr_groups[] = {
	&csrow_attr_grp,
	NULL
};
291

292
static void csrow_attr_release(struct device *dev)
293
{
294 295
	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);

296
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
297
	kfree(csrow);
298 299
}

300 301 302
static struct device_type csrow_attr_type = {
	.groups		= csrow_attr_groups,
	.release	= csrow_attr_release,
303 304
};

305 306 307 308
/*
 * possible dynamic channel DIMM Label attribute files
 *
 */
309

310
#define EDAC_NR_CHANNELS	6
311

312
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
313
	channel_dimm_label_show, channel_dimm_label_store, 0);
314
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
315
	channel_dimm_label_show, channel_dimm_label_store, 1);
316
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
317
	channel_dimm_label_show, channel_dimm_label_store, 2);
318
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
319
	channel_dimm_label_show, channel_dimm_label_store, 3);
320
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
321
	channel_dimm_label_show, channel_dimm_label_store, 4);
322
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
323
	channel_dimm_label_show, channel_dimm_label_store, 5);
324 325

/* Total possible dynamic DIMM Label attribute file table */
326 327 328 329 330 331 332
static struct device_attribute *dynamic_csrow_dimm_attr[] = {
	&dev_attr_legacy_ch0_dimm_label.attr,
	&dev_attr_legacy_ch1_dimm_label.attr,
	&dev_attr_legacy_ch2_dimm_label.attr,
	&dev_attr_legacy_ch3_dimm_label.attr,
	&dev_attr_legacy_ch4_dimm_label.attr,
	&dev_attr_legacy_ch5_dimm_label.attr
333 334 335
};

/* possible dynamic channel ce_count attribute files */
336
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
337
		   channel_ce_count_show, NULL, 0);
338
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
339
		   channel_ce_count_show, NULL, 1);
340
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
341
		   channel_ce_count_show, NULL, 2);
342
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
343
		   channel_ce_count_show, NULL, 3);
344
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
345
		   channel_ce_count_show, NULL, 4);
346
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
347
		   channel_ce_count_show, NULL, 5);
348 349

/* Total possible dynamic ce_count attribute file table */
350 351 352 353 354 355 356
static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
	&dev_attr_legacy_ch0_ce_count.attr,
	&dev_attr_legacy_ch1_ce_count.attr,
	&dev_attr_legacy_ch2_ce_count.attr,
	&dev_attr_legacy_ch3_ce_count.attr,
	&dev_attr_legacy_ch4_ce_count.attr,
	&dev_attr_legacy_ch5_ce_count.attr
357 358
};

359 360 361 362 363
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
	int chan, nr_pages = 0;

	for (chan = 0; chan < csrow->nr_channels; chan++)
364
		nr_pages += csrow->channels[chan]->dimm->nr_pages;
365 366 367 368

	return nr_pages;
}

369 370 371
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
				    struct csrow_info *csrow, int index)
372
{
373
	int err, chan;
374

375
	if (csrow->nr_channels > EDAC_NR_CHANNELS)
376
		return -ENODEV;
377

378
	csrow->dev.type = &csrow_attr_type;
B
Borislav Petkov 已提交
379
	csrow->dev.bus = mci->bus;
380 381
	device_initialize(&csrow->dev);
	csrow->dev.parent = &mci->dev;
B
Borislav Petkov 已提交
382
	csrow->mci = mci;
383 384
	dev_set_name(&csrow->dev, "csrow%d", index);
	dev_set_drvdata(&csrow->dev, csrow);
385

386 387
	edac_dbg(0, "creating (virtual) csrow node %s\n",
		 dev_name(&csrow->dev));
388

389 390 391
	err = device_add(&csrow->dev);
	if (err < 0)
		return err;
392

393
	for (chan = 0; chan < csrow->nr_channels; chan++) {
394
		/* Only expose populated DIMMs */
395
		if (!csrow->channels[chan]->dimm->nr_pages)
396
			continue;
397 398 399 400 401 402 403 404 405 406 407 408
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_dimm_attr[chan]);
		if (err < 0)
			goto error;
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_ce_count_attr[chan]);
		if (err < 0) {
			device_remove_file(&csrow->dev,
					   dynamic_csrow_dimm_attr[chan]);
			goto error;
		}
	}
409

410
	return 0;
411

412 413 414 415 416 417 418 419
error:
	for (--chan; chan >= 0; chan--) {
		device_remove_file(&csrow->dev,
					dynamic_csrow_dimm_attr[chan]);
		device_remove_file(&csrow->dev,
					   dynamic_csrow_ce_count_attr[chan]);
	}
	put_device(&csrow->dev);
420

421 422
	return err;
}
423 424

/* Create a CSROW object under specifed edac_mc_device */
425
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
426
{
427 428
	int err, i, chan;
	struct csrow_info *csrow;
429

430
	for (i = 0; i < mci->nr_csrows; i++) {
431
		csrow = mci->csrows[i];
432 433
		if (!nr_pages_per_csrow(csrow))
			continue;
434
		err = edac_create_csrow_object(mci, mci->csrows[i], i);
435 436 437 438
		if (err < 0) {
			edac_dbg(1,
				 "failure: create csrow objects for csrow %d\n",
				 i);
439
			goto error;
440
		}
441 442
	}
	return 0;
443

444 445
error:
	for (--i; i >= 0; i--) {
446
		csrow = mci->csrows[i];
447 448
		if (!nr_pages_per_csrow(csrow))
			continue;
449
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
450
			if (!csrow->channels[chan]->dimm->nr_pages)
451
				continue;
452 453 454 455 456
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
		}
457
		put_device(&mci->csrows[i]->dev);
458
	}
459

460 461
	return err;
}
462

463 464 465 466
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
	int i, chan;
	struct csrow_info *csrow;
467

468
	for (i = mci->nr_csrows - 1; i >= 0; i--) {
469
		csrow = mci->csrows[i];
470 471
		if (!nr_pages_per_csrow(csrow))
			continue;
472
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
473
			if (!csrow->channels[chan]->dimm->nr_pages)
474
				continue;
475 476
			edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
				 i, chan);
477 478 479 480
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
481
		}
482
		device_unregister(&mci->csrows[i]->dev);
483 484
	}
}
485 486 487 488 489 490 491 492 493 494 495 496 497 498
#endif

/*
 * Per-dimm (or per-rank) devices
 */

#define to_dimm(k) container_of(k, struct dimm_info, dev)

/* show/store functions for DIMM Label attributes */
static ssize_t dimmdev_location_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

499
	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
}

static ssize_t dimmdev_label_show(struct device *dev,
				  struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	/* if field has not been initialized, there is nothing to send */
	if (!dimm->label[0])
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
}

static ssize_t dimmdev_label_store(struct device *dev,
				   struct device_attribute *mattr,
				   const char *data,
				   size_t count)
{
	struct dimm_info *dimm = to_dimm(dev);

	ssize_t max_size = 0;

	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
	strncpy(dimm->label, data, max_size);
	dimm->label[max_size] = '\0';

	return max_size;
}

static ssize_t dimmdev_size_show(struct device *dev,
				 struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}

static ssize_t dimmdev_mem_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", mem_types[dimm->mtype]);
}

static ssize_t dimmdev_dev_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
}

static ssize_t dimmdev_edac_mode_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}

/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
		   dimmdev_label_show, dimmdev_label_store);
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);

/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
	&dev_attr_dimm_label.attr,
	&dev_attr_dimm_location.attr,
	&dev_attr_size.attr,
	&dev_attr_dimm_mem_type.attr,
	&dev_attr_dimm_dev_type.attr,
	&dev_attr_dimm_edac_mode.attr,
	NULL,
};

static struct attribute_group dimm_attr_grp = {
	.attrs	= dimm_attrs,
};

static const struct attribute_group *dimm_attr_groups[] = {
	&dimm_attr_grp,
	NULL
};

592
static void dimm_attr_release(struct device *dev)
593
{
594 595
	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);

596
	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
597
	kfree(dimm);
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
}

static struct device_type dimm_attr_type = {
	.groups		= dimm_attr_groups,
	.release	= dimm_attr_release,
};

/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
				   struct dimm_info *dimm,
				   int index)
{
	int err;
	dimm->mci = mci;

	dimm->dev.type = &dimm_attr_type;
B
Borislav Petkov 已提交
614
	dimm->dev.bus = mci->bus;
615 616 617
	device_initialize(&dimm->dev);

	dimm->dev.parent = &mci->dev;
618
	if (mci->csbased)
619 620 621 622 623 624 625 626
		dev_set_name(&dimm->dev, "rank%d", index);
	else
		dev_set_name(&dimm->dev, "dimm%d", index);
	dev_set_drvdata(&dimm->dev, dimm);
	pm_runtime_forbid(&mci->dev);

	err =  device_add(&dimm->dev);

627
	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
628 629 630

	return err;
}
631

632 633 634 635 636
/*
 * Memory controller device
 */

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
637

638 639
static ssize_t mci_reset_counters_store(struct device *dev,
					struct device_attribute *mattr,
640
					const char *data, size_t count)
641
{
642 643
	struct mem_ctl_info *mci = to_mci(dev);
	int cnt, row, chan, i;
644 645
	mci->ue_mc = 0;
	mci->ce_mc = 0;
646 647
	mci->ue_noinfo_count = 0;
	mci->ce_noinfo_count = 0;
648 649

	for (row = 0; row < mci->nr_csrows; row++) {
650
		struct csrow_info *ri = mci->csrows[row];
651 652 653 654 655

		ri->ue_count = 0;
		ri->ce_count = 0;

		for (chan = 0; chan < ri->nr_channels; chan++)
656
			ri->channels[chan]->ce_count = 0;
657 658
	}

659 660 661 662 663 664 665
	cnt = 1;
	for (i = 0; i < mci->n_layers; i++) {
		cnt *= mci->layers[i].size;
		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
	}

666 667 668 669
	mci->start_time = jiffies;
	return count;
}

670 671 672 673 674 675 676 677 678
/* Memory scrubbing interface:
 *
 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 *
 * Negative value still means that an error has occurred while setting
 * the scrub rate.
 */
679 680
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
					  struct device_attribute *mattr,
681
					  const char *data, size_t count)
682
{
683
	struct mem_ctl_info *mci = to_mci(dev);
684
	unsigned long bandwidth = 0;
685
	int new_bw = 0;
686

687
	if (kstrtoul(data, 10, &bandwidth) < 0)
688
		return -EINVAL;
689

690
	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
691 692 693 694
	if (new_bw < 0) {
		edac_printk(KERN_WARNING, EDAC_MC,
			    "Error setting scrub rate to: %lu\n", bandwidth);
		return -EINVAL;
695
	}
696

697
	return count;
698 699
}

700 701 702
/*
 * ->get_sdram_scrub_rate() return value semantics same as above.
 */
703 704 705
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
706
{
707
	struct mem_ctl_info *mci = to_mci(dev);
708
	int bandwidth = 0;
709

710 711
	bandwidth = mci->get_sdram_scrub_rate(mci);
	if (bandwidth < 0) {
712
		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
713
		return bandwidth;
714
	}
715 716

	return sprintf(data, "%d\n", bandwidth);
717 718 719
}

/* default attribute files for the MCI object */
720 721 722
static ssize_t mci_ue_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
723
{
724 725
	struct mem_ctl_info *mci = to_mci(dev);

726
	return sprintf(data, "%d\n", mci->ue_mc);
727 728
}

729 730 731
static ssize_t mci_ce_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
732
{
733 734
	struct mem_ctl_info *mci = to_mci(dev);

735
	return sprintf(data, "%d\n", mci->ce_mc);
736 737
}

738 739 740
static ssize_t mci_ce_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
741
{
742 743
	struct mem_ctl_info *mci = to_mci(dev);

744
	return sprintf(data, "%d\n", mci->ce_noinfo_count);
745 746
}

747 748 749
static ssize_t mci_ue_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
750
{
751 752
	struct mem_ctl_info *mci = to_mci(dev);

753
	return sprintf(data, "%d\n", mci->ue_noinfo_count);
754 755
}

756 757 758
static ssize_t mci_seconds_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
759
{
760 761
	struct mem_ctl_info *mci = to_mci(dev);

762
	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
763 764
}

765 766 767
static ssize_t mci_ctl_name_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
768
{
769 770
	struct mem_ctl_info *mci = to_mci(dev);

771
	return sprintf(data, "%s\n", mci->ctl_name);
772 773
}

774 775 776
static ssize_t mci_size_mb_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
777
{
778
	struct mem_ctl_info *mci = to_mci(dev);
779
	int total_pages = 0, csrow_idx, j;
780

781
	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
782
		struct csrow_info *csrow = mci->csrows[csrow_idx];
783

784 785
		for (j = 0; j < csrow->nr_channels; j++) {
			struct dimm_info *dimm = csrow->channels[j]->dimm;
J
Josh Hunt 已提交
786

787
			total_pages += dimm->nr_pages;
788
		}
789 790
	}

791
	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
792 793
}

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
static ssize_t mci_max_location_show(struct device *dev,
				     struct device_attribute *mattr,
				     char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
	int i;
	char *p = data;

	for (i = 0; i < mci->n_layers; i++) {
		p += sprintf(p, "%s %d ",
			     edac_layer_name[mci->layers[i].type],
			     mci->layers[i].size - 1);
	}

	return p - data;
}

811 812 813 814 815 816 817 818
#ifdef CONFIG_EDAC_DEBUG
static ssize_t edac_fake_inject_write(struct file *file,
				      const char __user *data,
				      size_t count, loff_t *ppos)
{
	struct device *dev = file->private_data;
	struct mem_ctl_info *mci = to_mci(dev);
	static enum hw_event_mc_err_type type;
819 820 821 822
	u16 errcount = mci->fake_inject_count;

	if (!errcount)
		errcount = 1;
823 824 825 826 827

	type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
				   : HW_EVENT_ERR_CORRECTED;

	printk(KERN_DEBUG
828 829
	       "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
		errcount,
830
		(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
831
		errcount > 1 ? "s" : "",
832 833 834 835
		mci->fake_inject_layer[0],
		mci->fake_inject_layer[1],
		mci->fake_inject_layer[2]
	       );
836
	edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
837 838 839
			     mci->fake_inject_layer[0],
			     mci->fake_inject_layer[1],
			     mci->fake_inject_layer[2],
840
			     "FAKE ERROR", "for EDAC testing only");
841 842 843 844 845

	return count;
}

static const struct file_operations debug_fake_inject_fops = {
W
Wei Yongjun 已提交
846
	.open = simple_open,
847 848 849 850 851
	.write = edac_fake_inject_write,
	.llseek = generic_file_llseek,
};
#endif

852
/* default Control file */
853
DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
854 855

/* default Attribute files */
856 857 858 859 860 861 862
DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
863
DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
864 865

/* memory scrubber attribute file */
866
DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
867

868 869 870 871 872 873 874 875 876
static struct attribute *mci_attrs[] = {
	&dev_attr_reset_counters.attr,
	&dev_attr_mc_name.attr,
	&dev_attr_size_mb.attr,
	&dev_attr_seconds_since_reset.attr,
	&dev_attr_ue_noinfo_count.attr,
	&dev_attr_ce_noinfo_count.attr,
	&dev_attr_ue_count.attr,
	&dev_attr_ce_count.attr,
877
	&dev_attr_max_location.attr,
878 879 880
	NULL
};

881 882
static struct attribute_group mci_attr_grp = {
	.attrs	= mci_attrs,
883 884
};

885 886 887
static const struct attribute_group *mci_attr_groups[] = {
	&mci_attr_grp,
	NULL
888 889
};

890
static void mci_attr_release(struct device *dev)
891
{
892 893
	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);

894
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
895
	kfree(mci);
896 897
}

898 899 900 901
static struct device_type mci_attr_type = {
	.groups		= mci_attr_groups,
	.release	= mci_attr_release,
};
902

903
#ifdef CONFIG_EDAC_DEBUG
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
static struct dentry *edac_debugfs;

int __init edac_debugfs_init(void)
{
	edac_debugfs = debugfs_create_dir("edac", NULL);
	if (IS_ERR(edac_debugfs)) {
		edac_debugfs = NULL;
		return -ENOMEM;
	}
	return 0;
}

void __exit edac_debugfs_exit(void)
{
	debugfs_remove(edac_debugfs);
}

921
static int edac_create_debug_nodes(struct mem_ctl_info *mci)
922 923 924 925 926
{
	struct dentry *d, *parent;
	char name[80];
	int i;

927 928 929 930
	if (!edac_debugfs)
		return -ENODEV;

	d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	if (!d)
		return -ENOMEM;
	parent = d;

	for (i = 0; i < mci->n_layers; i++) {
		sprintf(name, "fake_inject_%s",
			     edac_layer_name[mci->layers[i].type]);
		d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
				      &mci->fake_inject_layer[i]);
		if (!d)
			goto nomem;
	}

	d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_ue);
	if (!d)
		goto nomem;

949 950 951 952 953
	d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_count);
	if (!d)
		goto nomem;

954 955 956 957 958 959
	d = debugfs_create_file("fake_inject", S_IWUSR, parent,
				&mci->dev,
				&debug_fake_inject_fops);
	if (!d)
		goto nomem;

960
	mci->debugfs = parent;
961 962 963 964 965 966 967
	return 0;
nomem:
	debugfs_remove(mci->debugfs);
	return -ENOMEM;
}
#endif

968 969 970 971 972 973 974 975 976 977
/*
 * Create a new Memory Controller kobject instance,
 *	mc<id> under the 'mc' directory
 *
 * Return:
 *	0	Success
 *	!0	Failure
 */
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
978
	int i, err;
979

980 981 982 983
	/*
	 * The memory controller needs its own bus, in order to avoid
	 * namespace conflicts at /sys/bus/edac.
	 */
B
Borislav Petkov 已提交
984 985
	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
	if (!mci->bus->name)
986
		return -ENOMEM;
B
Borislav Petkov 已提交
987 988 989 990

	edac_dbg(0, "creating bus %s\n", mci->bus->name);

	err = bus_register(mci->bus);
991 992
	if (err < 0)
		return err;
993

994 995 996
	/* get the /sys/devices/system/edac subsys reference */
	mci->dev.type = &mci_attr_type;
	device_initialize(&mci->dev);
997

998
	mci->dev.parent = mci_pdev;
B
Borislav Petkov 已提交
999
	mci->dev.bus = mci->bus;
1000 1001 1002 1003
	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
	dev_set_drvdata(&mci->dev, mci);
	pm_runtime_forbid(&mci->dev);

1004
	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1005 1006
	err = device_add(&mci->dev);
	if (err < 0) {
1007
		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
B
Borislav Petkov 已提交
1008 1009
		bus_unregister(mci->bus);
		kfree(mci->bus->name);
1010
		return err;
1011 1012
	}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
		if (mci->get_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
			dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
		}
		if (mci->set_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
			dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
		}
		err = device_create_file(&mci->dev,
					 &dev_attr_sdram_scrub_rate);
		if (err) {
			edac_dbg(1, "failure: create sdram_scrub_rate\n");
			goto fail2;
		}
	}
1029 1030
	/*
	 * Create the dimm/rank devices
1031
	 */
1032
	for (i = 0; i < mci->tot_dimms; i++) {
1033
		struct dimm_info *dimm = mci->dimms[i];
1034 1035 1036 1037
		/* Only expose populated DIMMs */
		if (dimm->nr_pages == 0)
			continue;
#ifdef CONFIG_EDAC_DEBUG
1038
		edac_dbg(1, "creating dimm%d, located at ", i);
1039 1040 1041 1042 1043 1044 1045
		if (edac_debug_level >= 1) {
			int lay;
			for (lay = 0; lay < mci->n_layers; lay++)
				printk(KERN_CONT "%s %d ",
					edac_layer_name[mci->layers[lay].type],
					dimm->location[lay]);
			printk(KERN_CONT "\n");
1046
		}
1047
#endif
1048 1049
		err = edac_create_dimm_object(mci, dimm, i);
		if (err) {
1050
			edac_dbg(1, "failure: create dimm %d obj\n", i);
1051 1052
			goto fail;
		}
1053 1054
	}

1055
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1056 1057 1058
	err = edac_create_csrow_objects(mci);
	if (err < 0)
		goto fail;
1059
#endif
1060

1061 1062 1063
#ifdef CONFIG_EDAC_DEBUG
	edac_create_debug_nodes(mci);
#endif
1064 1065
	return 0;

1066
fail:
1067
	for (i--; i >= 0; i--) {
1068
		struct dimm_info *dimm = mci->dimms[i];
1069 1070
		if (dimm->nr_pages == 0)
			continue;
1071
		device_unregister(&dimm->dev);
1072
	}
1073
fail2:
1074
	device_unregister(&mci->dev);
B
Borislav Petkov 已提交
1075 1076
	bus_unregister(mci->bus);
	kfree(mci->bus->name);
1077 1078 1079 1080 1081 1082 1083 1084
	return err;
}

/*
 * remove a Memory Controller instance
 */
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
1085
	int i;
1086

1087
	edac_dbg(0, "\n");
1088

1089 1090 1091
#ifdef CONFIG_EDAC_DEBUG
	debugfs_remove(mci->debugfs);
#endif
1092
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1093
	edac_delete_csrow_objects(mci);
1094
#endif
1095

1096
	for (i = 0; i < mci->tot_dimms; i++) {
1097
		struct dimm_info *dimm = mci->dimms[i];
1098 1099
		if (dimm->nr_pages == 0)
			continue;
1100
		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1101
		device_unregister(&dimm->dev);
1102
	}
1103
}
1104

1105 1106
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
1107
	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1108
	device_unregister(&mci->dev);
B
Borislav Petkov 已提交
1109 1110
	bus_unregister(mci->bus);
	kfree(mci->bus->name);
1111
}
1112

1113
static void mc_attr_release(struct device *dev)
1114
{
1115 1116 1117 1118 1119
	/*
	 * There's no container structure here, as this is just the mci
	 * parent device, used to create the /sys/devices/mc sysfs node.
	 * So, there are no attributes on it.
	 */
1120
	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1121
	kfree(dev);
1122
}
1123

1124 1125 1126
static struct device_type mc_attr_type = {
	.release	= mc_attr_release,
};
1127
/*
1128
 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1129
 */
1130
int __init edac_mc_sysfs_init(void)
1131
{
1132
	struct bus_type *edac_subsys;
1133
	int err;
1134

1135 1136 1137
	/* get the /sys/devices/system/edac subsys reference */
	edac_subsys = edac_get_sysfs_subsys();
	if (edac_subsys == NULL) {
1138
		edac_dbg(1, "no edac_subsys\n");
1139 1140
		err = -EINVAL;
		goto out;
1141 1142
	}

1143
	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1144 1145 1146 1147
	if (!mci_pdev) {
		err = -ENOMEM;
		goto out_put_sysfs;
	}
1148 1149 1150 1151 1152

	mci_pdev->bus = edac_subsys;
	mci_pdev->type = &mc_attr_type;
	device_initialize(mci_pdev);
	dev_set_name(mci_pdev, "mc");
1153

1154
	err = device_add(mci_pdev);
1155
	if (err < 0)
1156
		goto out_dev_free;
1157

1158
	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1159

1160
	return 0;
1161 1162 1163 1164 1165 1166 1167

 out_dev_free:
	kfree(mci_pdev);
 out_put_sysfs:
	edac_put_sysfs_subsys();
 out:
	return err;
1168 1169
}

1170
void __exit edac_mc_sysfs_exit(void)
1171
{
1172
	device_unregister(mci_pdev);
1173
	edac_put_sysfs_subsys();
1174
}