edac_mc_sysfs.c 28.9 KB
Newer Older
1 2
/*
 * edac_mc kernel module
3 4
 * (C) 2005-2007 Linux Networx (http://lnxi.com)
 *
5 6 7
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
8
 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9
 *
10
 * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 12
 *	The entire API were re-written, and ported to use struct device
 *
13 14 15
 */

#include <linux/ctype.h>
16
#include <linux/slab.h>
17
#include <linux/edac.h>
18
#include <linux/bug.h>
19
#include <linux/pm_runtime.h>
20
#include <linux/uaccess.h>
21

22
#include "edac_core.h"
23 24 25
#include "edac_module.h"

/* MC EDAC Controls, setable by module parameter, and sysfs */
D
Dave Jiang 已提交
26 27
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
28
static int edac_mc_panic_on_ue;
D
Dave Jiang 已提交
29
static int edac_mc_poll_msec = 1000;
30 31

/* Getter functions for above */
D
Dave Jiang 已提交
32
int edac_mc_get_log_ue(void)
33
{
D
Dave Jiang 已提交
34
	return edac_mc_log_ue;
35 36
}

D
Dave Jiang 已提交
37
int edac_mc_get_log_ce(void)
38
{
D
Dave Jiang 已提交
39
	return edac_mc_log_ce;
40 41
}

D
Dave Jiang 已提交
42
int edac_mc_get_panic_on_ue(void)
43
{
D
Dave Jiang 已提交
44
	return edac_mc_panic_on_ue;
45 46
}

47 48 49
/* this is temporary */
int edac_mc_get_poll_msec(void)
{
D
Dave Jiang 已提交
50
	return edac_mc_poll_msec;
51 52
}

A
Arthur Jones 已提交
53 54 55 56 57 58 59 60
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
	long l;
	int ret;

	if (!val)
		return -EINVAL;

61 62 63
	ret = kstrtol(val, 0, &l);
	if (ret)
		return ret;
64
	if (!l || ((int)l != l))
A
Arthur Jones 已提交
65 66 67 68 69 70 71 72 73
		return -EINVAL;
	*((int *)kp->arg) = l;

	/* notify edac_mc engine to reset the poll period */
	edac_mc_reset_delay_period(l);

	return 0;
}

74
/* Parameter declarations for above */
D
Dave Jiang 已提交
75 76 77 78
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
79
		 "Log uncorrectable error to console: 0=off 1=on");
D
Dave Jiang 已提交
80 81
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
82
		 "Log correctable error to console: 0=off 1=on");
A
Arthur Jones 已提交
83 84
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
		  &edac_mc_poll_msec, 0644);
D
Dave Jiang 已提交
85
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
86

87
static struct device *mci_pdev;
88

89 90 91
/*
 * various constants for Memory Controllers
 */
92
static const char * const mem_types[] = {
93 94 95 96 97 98 99 100 101 102
	[MEM_EMPTY] = "Empty",
	[MEM_RESERVED] = "Reserved",
	[MEM_UNKNOWN] = "Unknown",
	[MEM_FPM] = "FPM",
	[MEM_EDO] = "EDO",
	[MEM_BEDO] = "BEDO",
	[MEM_SDR] = "Unbuffered-SDR",
	[MEM_RDR] = "Registered-SDR",
	[MEM_DDR] = "Unbuffered-DDR",
	[MEM_RDDR] = "Registered-DDR",
103 104 105
	[MEM_RMBS] = "RMBS",
	[MEM_DDR2] = "Unbuffered-DDR2",
	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
106
	[MEM_RDDR2] = "Registered-DDR2",
107 108 109
	[MEM_XDR] = "XDR",
	[MEM_DDR3] = "Unbuffered-DDR3",
	[MEM_RDDR3] = "Registered-DDR3"
110 111
};

112
static const char * const dev_types[] = {
113 114 115 116 117 118 119 120 121 122
	[DEV_UNKNOWN] = "Unknown",
	[DEV_X1] = "x1",
	[DEV_X2] = "x2",
	[DEV_X4] = "x4",
	[DEV_X8] = "x8",
	[DEV_X16] = "x16",
	[DEV_X32] = "x32",
	[DEV_X64] = "x64"
};

123
static const char * const edac_caps[] = {
124 125 126 127 128 129 130 131 132 133 134 135
	[EDAC_UNKNOWN] = "Unknown",
	[EDAC_NONE] = "None",
	[EDAC_RESERVED] = "Reserved",
	[EDAC_PARITY] = "PARITY",
	[EDAC_EC] = "EC",
	[EDAC_SECDED] = "SECDED",
	[EDAC_S2ECD2ED] = "S2ECD2ED",
	[EDAC_S4ECD4ED] = "S4ECD4ED",
	[EDAC_S8ECD8ED] = "S8ECD8ED",
	[EDAC_S16ECD16ED] = "S16ECD16ED"
};

136
#ifdef CONFIG_EDAC_LEGACY_SYSFS
137 138 139 140 141 142 143 144 145
/*
 * EDAC sysfs CSROW data structures and methods
 */

#define to_csrow(k) container_of(k, struct csrow_info, dev)

/*
 * We need it to avoid namespace conflicts between the legacy API
 * and the per-dimm/per-rank one
146
 */
147
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
148
	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
149 150 151 152 153 154 155 156 157 158 159

struct dev_ch_attribute {
	struct device_attribute attr;
	int channel;
};

#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
	struct dev_ch_attribute dev_attr_legacy_##_name = \
		{ __ATTR(_name, _mode, _show, _store), (_var) }

#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
160 161

/* Set of more default csrow<id> attribute show/store functions */
162 163
static ssize_t csrow_ue_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
164
{
165 166
	struct csrow_info *csrow = to_csrow(dev);

167
	return sprintf(data, "%u\n", csrow->ue_count);
168 169
}

170 171
static ssize_t csrow_ce_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
172
{
173 174
	struct csrow_info *csrow = to_csrow(dev);

175
	return sprintf(data, "%u\n", csrow->ce_count);
176 177
}

178 179
static ssize_t csrow_size_show(struct device *dev,
			       struct device_attribute *mattr, char *data)
180
{
181
	struct csrow_info *csrow = to_csrow(dev);
182 183 184 185
	int i;
	u32 nr_pages = 0;

	for (i = 0; i < csrow->nr_channels; i++)
186
		nr_pages += csrow->channels[i]->dimm->nr_pages;
187
	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
188 189
}

190 191
static ssize_t csrow_mem_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
192
{
193 194
	struct csrow_info *csrow = to_csrow(dev);

195
	return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
196 197
}

198 199
static ssize_t csrow_dev_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
200
{
201 202
	struct csrow_info *csrow = to_csrow(dev);

203
	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
204 205
}

206 207 208
static ssize_t csrow_edac_mode_show(struct device *dev,
				    struct device_attribute *mattr,
				    char *data)
209
{
210 211
	struct csrow_info *csrow = to_csrow(dev);

212
	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
213 214 215
}

/* show/store functions for DIMM Label attributes */
216 217 218
static ssize_t channel_dimm_label_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
219
{
220 221
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
222
	struct rank_info *rank = csrow->channels[chan];
223

224
	/* if field has not been initialized, there is nothing to send */
225
	if (!rank->dimm->label[0])
226 227 228
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
229
			rank->dimm->label);
230 231
}

232 233 234
static ssize_t channel_dimm_label_store(struct device *dev,
					struct device_attribute *mattr,
					const char *data, size_t count)
235
{
236 237
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
238
	struct rank_info *rank = csrow->channels[chan];
239

240 241
	ssize_t max_size = 0;

242
	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
243 244
	strncpy(rank->dimm->label, data, max_size);
	rank->dimm->label[max_size] = '\0';
245 246 247 248 249

	return max_size;
}

/* show function for dynamic chX_ce_count attribute */
250 251
static ssize_t channel_ce_count_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
252
{
253 254
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
255
	struct rank_info *rank = csrow->channels[chan];
256 257

	return sprintf(data, "%u\n", rank->ce_count);
258 259
}

260 261 262 263 264 265 266
/* cwrow<id>/attribute files */
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
267

268 269 270 271 272 273 274 275 276 277
/* default attributes of the CSROW<id> object */
static struct attribute *csrow_attrs[] = {
	&dev_attr_legacy_dev_type.attr,
	&dev_attr_legacy_mem_type.attr,
	&dev_attr_legacy_edac_mode.attr,
	&dev_attr_legacy_size_mb.attr,
	&dev_attr_legacy_ue_count.attr,
	&dev_attr_legacy_ce_count.attr,
	NULL,
};
278

279 280 281
static struct attribute_group csrow_attr_grp = {
	.attrs	= csrow_attrs,
};
282

283 284 285 286
static const struct attribute_group *csrow_attr_groups[] = {
	&csrow_attr_grp,
	NULL
};
287

288
static void csrow_attr_release(struct device *dev)
289
{
290 291
	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);

292
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
293
	kfree(csrow);
294 295
}

296 297 298
static struct device_type csrow_attr_type = {
	.groups		= csrow_attr_groups,
	.release	= csrow_attr_release,
299 300
};

301 302 303 304
/*
 * possible dynamic channel DIMM Label attribute files
 *
 */
305

306
#define EDAC_NR_CHANNELS	6
307

308
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
309
	channel_dimm_label_show, channel_dimm_label_store, 0);
310
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
311
	channel_dimm_label_show, channel_dimm_label_store, 1);
312
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
313
	channel_dimm_label_show, channel_dimm_label_store, 2);
314
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
315
	channel_dimm_label_show, channel_dimm_label_store, 3);
316
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
317
	channel_dimm_label_show, channel_dimm_label_store, 4);
318
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
319
	channel_dimm_label_show, channel_dimm_label_store, 5);
320 321

/* Total possible dynamic DIMM Label attribute file table */
322 323 324 325 326 327 328
static struct device_attribute *dynamic_csrow_dimm_attr[] = {
	&dev_attr_legacy_ch0_dimm_label.attr,
	&dev_attr_legacy_ch1_dimm_label.attr,
	&dev_attr_legacy_ch2_dimm_label.attr,
	&dev_attr_legacy_ch3_dimm_label.attr,
	&dev_attr_legacy_ch4_dimm_label.attr,
	&dev_attr_legacy_ch5_dimm_label.attr
329 330 331
};

/* possible dynamic channel ce_count attribute files */
332
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
333
		   channel_ce_count_show, NULL, 0);
334
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
335
		   channel_ce_count_show, NULL, 1);
336
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
337
		   channel_ce_count_show, NULL, 2);
338
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
339
		   channel_ce_count_show, NULL, 3);
340
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
341
		   channel_ce_count_show, NULL, 4);
342
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
343
		   channel_ce_count_show, NULL, 5);
344 345

/* Total possible dynamic ce_count attribute file table */
346 347 348 349 350 351 352
static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
	&dev_attr_legacy_ch0_ce_count.attr,
	&dev_attr_legacy_ch1_ce_count.attr,
	&dev_attr_legacy_ch2_ce_count.attr,
	&dev_attr_legacy_ch3_ce_count.attr,
	&dev_attr_legacy_ch4_ce_count.attr,
	&dev_attr_legacy_ch5_ce_count.attr
353 354
};

355 356 357 358 359
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
	int chan, nr_pages = 0;

	for (chan = 0; chan < csrow->nr_channels; chan++)
360
		nr_pages += csrow->channels[chan]->dimm->nr_pages;
361 362 363 364

	return nr_pages;
}

365 366 367
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
				    struct csrow_info *csrow, int index)
368
{
369
	int err, chan;
370

371 372
	if (csrow->nr_channels >= EDAC_NR_CHANNELS)
		return -ENODEV;
373

374
	csrow->dev.type = &csrow_attr_type;
B
Borislav Petkov 已提交
375
	csrow->dev.bus = mci->bus;
376 377
	device_initialize(&csrow->dev);
	csrow->dev.parent = &mci->dev;
B
Borislav Petkov 已提交
378
	csrow->mci = mci;
379 380
	dev_set_name(&csrow->dev, "csrow%d", index);
	dev_set_drvdata(&csrow->dev, csrow);
381

382 383
	edac_dbg(0, "creating (virtual) csrow node %s\n",
		 dev_name(&csrow->dev));
384

385 386 387
	err = device_add(&csrow->dev);
	if (err < 0)
		return err;
388

389
	for (chan = 0; chan < csrow->nr_channels; chan++) {
390
		/* Only expose populated DIMMs */
391
		if (!csrow->channels[chan]->dimm->nr_pages)
392
			continue;
393 394 395 396 397 398 399 400 401 402 403 404
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_dimm_attr[chan]);
		if (err < 0)
			goto error;
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_ce_count_attr[chan]);
		if (err < 0) {
			device_remove_file(&csrow->dev,
					   dynamic_csrow_dimm_attr[chan]);
			goto error;
		}
	}
405

406
	return 0;
407

408 409 410 411 412 413 414 415
error:
	for (--chan; chan >= 0; chan--) {
		device_remove_file(&csrow->dev,
					dynamic_csrow_dimm_attr[chan]);
		device_remove_file(&csrow->dev,
					   dynamic_csrow_ce_count_attr[chan]);
	}
	put_device(&csrow->dev);
416

417 418
	return err;
}
419 420

/* Create a CSROW object under specifed edac_mc_device */
421
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
422
{
423 424
	int err, i, chan;
	struct csrow_info *csrow;
425

426
	for (i = 0; i < mci->nr_csrows; i++) {
427
		csrow = mci->csrows[i];
428 429
		if (!nr_pages_per_csrow(csrow))
			continue;
430
		err = edac_create_csrow_object(mci, mci->csrows[i], i);
431 432 433 434
		if (err < 0) {
			edac_dbg(1,
				 "failure: create csrow objects for csrow %d\n",
				 i);
435
			goto error;
436
		}
437 438
	}
	return 0;
439

440 441
error:
	for (--i; i >= 0; i--) {
442
		csrow = mci->csrows[i];
443 444
		if (!nr_pages_per_csrow(csrow))
			continue;
445
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
446
			if (!csrow->channels[chan]->dimm->nr_pages)
447
				continue;
448 449 450 451 452
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
		}
453
		put_device(&mci->csrows[i]->dev);
454
	}
455

456 457
	return err;
}
458

459 460 461 462
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
	int i, chan;
	struct csrow_info *csrow;
463

464
	for (i = mci->nr_csrows - 1; i >= 0; i--) {
465
		csrow = mci->csrows[i];
466 467
		if (!nr_pages_per_csrow(csrow))
			continue;
468
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
469
			if (!csrow->channels[chan]->dimm->nr_pages)
470
				continue;
471 472
			edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
				 i, chan);
473 474 475 476
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
477
		}
478
		device_unregister(&mci->csrows[i]->dev);
479 480
	}
}
481 482 483 484 485 486 487 488 489 490 491 492 493 494
#endif

/*
 * Per-dimm (or per-rank) devices
 */

#define to_dimm(k) container_of(k, struct dimm_info, dev)

/* show/store functions for DIMM Label attributes */
static ssize_t dimmdev_location_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

495
	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
}

static ssize_t dimmdev_label_show(struct device *dev,
				  struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	/* if field has not been initialized, there is nothing to send */
	if (!dimm->label[0])
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
}

static ssize_t dimmdev_label_store(struct device *dev,
				   struct device_attribute *mattr,
				   const char *data,
				   size_t count)
{
	struct dimm_info *dimm = to_dimm(dev);

	ssize_t max_size = 0;

	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
	strncpy(dimm->label, data, max_size);
	dimm->label[max_size] = '\0';

	return max_size;
}

static ssize_t dimmdev_size_show(struct device *dev,
				 struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}

static ssize_t dimmdev_mem_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", mem_types[dimm->mtype]);
}

static ssize_t dimmdev_dev_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
}

static ssize_t dimmdev_edac_mode_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}

/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
		   dimmdev_label_show, dimmdev_label_store);
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);

/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
	&dev_attr_dimm_label.attr,
	&dev_attr_dimm_location.attr,
	&dev_attr_size.attr,
	&dev_attr_dimm_mem_type.attr,
	&dev_attr_dimm_dev_type.attr,
	&dev_attr_dimm_edac_mode.attr,
	NULL,
};

static struct attribute_group dimm_attr_grp = {
	.attrs	= dimm_attrs,
};

static const struct attribute_group *dimm_attr_groups[] = {
	&dimm_attr_grp,
	NULL
};

588
static void dimm_attr_release(struct device *dev)
589
{
590 591
	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);

592
	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
593
	kfree(dimm);
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
}

static struct device_type dimm_attr_type = {
	.groups		= dimm_attr_groups,
	.release	= dimm_attr_release,
};

/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
				   struct dimm_info *dimm,
				   int index)
{
	int err;
	dimm->mci = mci;

	dimm->dev.type = &dimm_attr_type;
B
Borislav Petkov 已提交
610
	dimm->dev.bus = mci->bus;
611 612 613
	device_initialize(&dimm->dev);

	dimm->dev.parent = &mci->dev;
614
	if (mci->csbased)
615 616 617 618 619 620 621 622
		dev_set_name(&dimm->dev, "rank%d", index);
	else
		dev_set_name(&dimm->dev, "dimm%d", index);
	dev_set_drvdata(&dimm->dev, dimm);
	pm_runtime_forbid(&mci->dev);

	err =  device_add(&dimm->dev);

623
	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
624 625 626

	return err;
}
627

628 629 630 631 632
/*
 * Memory controller device
 */

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
633

634 635
static ssize_t mci_reset_counters_store(struct device *dev,
					struct device_attribute *mattr,
636
					const char *data, size_t count)
637
{
638 639
	struct mem_ctl_info *mci = to_mci(dev);
	int cnt, row, chan, i;
640 641
	mci->ue_mc = 0;
	mci->ce_mc = 0;
642 643
	mci->ue_noinfo_count = 0;
	mci->ce_noinfo_count = 0;
644 645

	for (row = 0; row < mci->nr_csrows; row++) {
646
		struct csrow_info *ri = mci->csrows[row];
647 648 649 650 651

		ri->ue_count = 0;
		ri->ce_count = 0;

		for (chan = 0; chan < ri->nr_channels; chan++)
652
			ri->channels[chan]->ce_count = 0;
653 654
	}

655 656 657 658 659 660 661
	cnt = 1;
	for (i = 0; i < mci->n_layers; i++) {
		cnt *= mci->layers[i].size;
		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
	}

662 663 664 665
	mci->start_time = jiffies;
	return count;
}

666 667 668 669 670 671 672 673 674
/* Memory scrubbing interface:
 *
 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 *
 * Negative value still means that an error has occurred while setting
 * the scrub rate.
 */
675 676
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
					  struct device_attribute *mattr,
677
					  const char *data, size_t count)
678
{
679
	struct mem_ctl_info *mci = to_mci(dev);
680
	unsigned long bandwidth = 0;
681
	int new_bw = 0;
682

683
	if (kstrtoul(data, 10, &bandwidth) < 0)
684
		return -EINVAL;
685

686
	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
687 688 689 690
	if (new_bw < 0) {
		edac_printk(KERN_WARNING, EDAC_MC,
			    "Error setting scrub rate to: %lu\n", bandwidth);
		return -EINVAL;
691
	}
692

693
	return count;
694 695
}

696 697 698
/*
 * ->get_sdram_scrub_rate() return value semantics same as above.
 */
699 700 701
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
702
{
703
	struct mem_ctl_info *mci = to_mci(dev);
704
	int bandwidth = 0;
705

706 707
	bandwidth = mci->get_sdram_scrub_rate(mci);
	if (bandwidth < 0) {
708
		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
709
		return bandwidth;
710
	}
711 712

	return sprintf(data, "%d\n", bandwidth);
713 714 715
}

/* default attribute files for the MCI object */
716 717 718
static ssize_t mci_ue_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
719
{
720 721
	struct mem_ctl_info *mci = to_mci(dev);

722
	return sprintf(data, "%d\n", mci->ue_mc);
723 724
}

725 726 727
static ssize_t mci_ce_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
728
{
729 730
	struct mem_ctl_info *mci = to_mci(dev);

731
	return sprintf(data, "%d\n", mci->ce_mc);
732 733
}

734 735 736
static ssize_t mci_ce_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
737
{
738 739
	struct mem_ctl_info *mci = to_mci(dev);

740
	return sprintf(data, "%d\n", mci->ce_noinfo_count);
741 742
}

743 744 745
static ssize_t mci_ue_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
746
{
747 748
	struct mem_ctl_info *mci = to_mci(dev);

749
	return sprintf(data, "%d\n", mci->ue_noinfo_count);
750 751
}

752 753 754
static ssize_t mci_seconds_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
755
{
756 757
	struct mem_ctl_info *mci = to_mci(dev);

758
	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
759 760
}

761 762 763
static ssize_t mci_ctl_name_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
764
{
765 766
	struct mem_ctl_info *mci = to_mci(dev);

767
	return sprintf(data, "%s\n", mci->ctl_name);
768 769
}

770 771 772
static ssize_t mci_size_mb_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
773
{
774
	struct mem_ctl_info *mci = to_mci(dev);
775
	int total_pages = 0, csrow_idx, j;
776

777
	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
778
		struct csrow_info *csrow = mci->csrows[csrow_idx];
779

780 781
		for (j = 0; j < csrow->nr_channels; j++) {
			struct dimm_info *dimm = csrow->channels[j]->dimm;
J
Josh Hunt 已提交
782

783
			total_pages += dimm->nr_pages;
784
		}
785 786
	}

787
	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
788 789
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
static ssize_t mci_max_location_show(struct device *dev,
				     struct device_attribute *mattr,
				     char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
	int i;
	char *p = data;

	for (i = 0; i < mci->n_layers; i++) {
		p += sprintf(p, "%s %d ",
			     edac_layer_name[mci->layers[i].type],
			     mci->layers[i].size - 1);
	}

	return p - data;
}

807 808 809 810 811 812 813 814
#ifdef CONFIG_EDAC_DEBUG
static ssize_t edac_fake_inject_write(struct file *file,
				      const char __user *data,
				      size_t count, loff_t *ppos)
{
	struct device *dev = file->private_data;
	struct mem_ctl_info *mci = to_mci(dev);
	static enum hw_event_mc_err_type type;
815 816 817 818
	u16 errcount = mci->fake_inject_count;

	if (!errcount)
		errcount = 1;
819 820 821 822 823

	type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
				   : HW_EVENT_ERR_CORRECTED;

	printk(KERN_DEBUG
824 825
	       "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
		errcount,
826
		(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
827
		errcount > 1 ? "s" : "",
828 829 830 831
		mci->fake_inject_layer[0],
		mci->fake_inject_layer[1],
		mci->fake_inject_layer[2]
	       );
832
	edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
833 834 835
			     mci->fake_inject_layer[0],
			     mci->fake_inject_layer[1],
			     mci->fake_inject_layer[2],
836
			     "FAKE ERROR", "for EDAC testing only");
837 838 839 840 841

	return count;
}

static const struct file_operations debug_fake_inject_fops = {
W
Wei Yongjun 已提交
842
	.open = simple_open,
843 844 845 846 847
	.write = edac_fake_inject_write,
	.llseek = generic_file_llseek,
};
#endif

848
/* default Control file */
849
DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
850 851

/* default Attribute files */
852 853 854 855 856 857 858
DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
859
DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
860 861

/* memory scrubber attribute file */
862
DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
863

864 865 866 867 868 869 870 871 872
static struct attribute *mci_attrs[] = {
	&dev_attr_reset_counters.attr,
	&dev_attr_mc_name.attr,
	&dev_attr_size_mb.attr,
	&dev_attr_seconds_since_reset.attr,
	&dev_attr_ue_noinfo_count.attr,
	&dev_attr_ce_noinfo_count.attr,
	&dev_attr_ue_count.attr,
	&dev_attr_ce_count.attr,
873
	&dev_attr_max_location.attr,
874 875 876
	NULL
};

877 878
static struct attribute_group mci_attr_grp = {
	.attrs	= mci_attrs,
879 880
};

881 882 883
static const struct attribute_group *mci_attr_groups[] = {
	&mci_attr_grp,
	NULL
884 885
};

886
static void mci_attr_release(struct device *dev)
887
{
888 889
	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);

890
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
891
	kfree(mci);
892 893
}

894 895 896 897
static struct device_type mci_attr_type = {
	.groups		= mci_attr_groups,
	.release	= mci_attr_release,
};
898

899
#ifdef CONFIG_EDAC_DEBUG
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
static struct dentry *edac_debugfs;

int __init edac_debugfs_init(void)
{
	edac_debugfs = debugfs_create_dir("edac", NULL);
	if (IS_ERR(edac_debugfs)) {
		edac_debugfs = NULL;
		return -ENOMEM;
	}
	return 0;
}

void __exit edac_debugfs_exit(void)
{
	debugfs_remove(edac_debugfs);
}

917
static int edac_create_debug_nodes(struct mem_ctl_info *mci)
918 919 920 921 922
{
	struct dentry *d, *parent;
	char name[80];
	int i;

923 924 925 926
	if (!edac_debugfs)
		return -ENODEV;

	d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
	if (!d)
		return -ENOMEM;
	parent = d;

	for (i = 0; i < mci->n_layers; i++) {
		sprintf(name, "fake_inject_%s",
			     edac_layer_name[mci->layers[i].type]);
		d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
				      &mci->fake_inject_layer[i]);
		if (!d)
			goto nomem;
	}

	d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_ue);
	if (!d)
		goto nomem;

945 946 947 948 949
	d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_count);
	if (!d)
		goto nomem;

950 951 952 953 954 955
	d = debugfs_create_file("fake_inject", S_IWUSR, parent,
				&mci->dev,
				&debug_fake_inject_fops);
	if (!d)
		goto nomem;

956
	mci->debugfs = parent;
957 958 959 960 961 962 963
	return 0;
nomem:
	debugfs_remove(mci->debugfs);
	return -ENOMEM;
}
#endif

964 965 966 967 968 969 970 971 972 973
/*
 * Create a new Memory Controller kobject instance,
 *	mc<id> under the 'mc' directory
 *
 * Return:
 *	0	Success
 *	!0	Failure
 */
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
974
	int i, err;
975

976 977 978 979
	/*
	 * The memory controller needs its own bus, in order to avoid
	 * namespace conflicts at /sys/bus/edac.
	 */
B
Borislav Petkov 已提交
980 981
	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
	if (!mci->bus->name)
982
		return -ENOMEM;
B
Borislav Petkov 已提交
983 984 985 986

	edac_dbg(0, "creating bus %s\n", mci->bus->name);

	err = bus_register(mci->bus);
987 988
	if (err < 0)
		return err;
989

990 991 992
	/* get the /sys/devices/system/edac subsys reference */
	mci->dev.type = &mci_attr_type;
	device_initialize(&mci->dev);
993

994
	mci->dev.parent = mci_pdev;
B
Borislav Petkov 已提交
995
	mci->dev.bus = mci->bus;
996 997 998 999
	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
	dev_set_drvdata(&mci->dev, mci);
	pm_runtime_forbid(&mci->dev);

1000
	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1001 1002
	err = device_add(&mci->dev);
	if (err < 0) {
1003
		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
B
Borislav Petkov 已提交
1004 1005
		bus_unregister(mci->bus);
		kfree(mci->bus->name);
1006
		return err;
1007 1008
	}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
	if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
		if (mci->get_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
			dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
		}
		if (mci->set_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
			dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
		}
		err = device_create_file(&mci->dev,
					 &dev_attr_sdram_scrub_rate);
		if (err) {
			edac_dbg(1, "failure: create sdram_scrub_rate\n");
			goto fail2;
		}
	}
1025 1026
	/*
	 * Create the dimm/rank devices
1027
	 */
1028
	for (i = 0; i < mci->tot_dimms; i++) {
1029
		struct dimm_info *dimm = mci->dimms[i];
1030 1031 1032 1033
		/* Only expose populated DIMMs */
		if (dimm->nr_pages == 0)
			continue;
#ifdef CONFIG_EDAC_DEBUG
1034
		edac_dbg(1, "creating dimm%d, located at ", i);
1035 1036 1037 1038 1039 1040 1041
		if (edac_debug_level >= 1) {
			int lay;
			for (lay = 0; lay < mci->n_layers; lay++)
				printk(KERN_CONT "%s %d ",
					edac_layer_name[mci->layers[lay].type],
					dimm->location[lay]);
			printk(KERN_CONT "\n");
1042
		}
1043
#endif
1044 1045
		err = edac_create_dimm_object(mci, dimm, i);
		if (err) {
1046
			edac_dbg(1, "failure: create dimm %d obj\n", i);
1047 1048
			goto fail;
		}
1049 1050
	}

1051
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1052 1053 1054
	err = edac_create_csrow_objects(mci);
	if (err < 0)
		goto fail;
1055
#endif
1056

1057 1058 1059
#ifdef CONFIG_EDAC_DEBUG
	edac_create_debug_nodes(mci);
#endif
1060 1061
	return 0;

1062
fail:
1063
	for (i--; i >= 0; i--) {
1064
		struct dimm_info *dimm = mci->dimms[i];
1065 1066
		if (dimm->nr_pages == 0)
			continue;
1067
		device_unregister(&dimm->dev);
1068
	}
1069
fail2:
1070
	device_unregister(&mci->dev);
B
Borislav Petkov 已提交
1071 1072
	bus_unregister(mci->bus);
	kfree(mci->bus->name);
1073 1074 1075 1076 1077 1078 1079 1080
	return err;
}

/*
 * remove a Memory Controller instance
 */
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
1081
	int i;
1082

1083
	edac_dbg(0, "\n");
1084

1085 1086 1087
#ifdef CONFIG_EDAC_DEBUG
	debugfs_remove(mci->debugfs);
#endif
1088
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1089
	edac_delete_csrow_objects(mci);
1090
#endif
1091

1092
	for (i = 0; i < mci->tot_dimms; i++) {
1093
		struct dimm_info *dimm = mci->dimms[i];
1094 1095
		if (dimm->nr_pages == 0)
			continue;
1096
		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1097
		device_unregister(&dimm->dev);
1098
	}
1099
}
1100

1101 1102
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
1103
	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1104
	device_unregister(&mci->dev);
B
Borislav Petkov 已提交
1105 1106
	bus_unregister(mci->bus);
	kfree(mci->bus->name);
1107
}
1108

1109
static void mc_attr_release(struct device *dev)
1110
{
1111 1112 1113 1114 1115
	/*
	 * There's no container structure here, as this is just the mci
	 * parent device, used to create the /sys/devices/mc sysfs node.
	 * So, there are no attributes on it.
	 */
1116
	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1117
	kfree(dev);
1118
}
1119

1120 1121 1122
static struct device_type mc_attr_type = {
	.release	= mc_attr_release,
};
1123
/*
1124
 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1125
 */
1126
int __init edac_mc_sysfs_init(void)
1127
{
1128
	struct bus_type *edac_subsys;
1129
	int err;
1130

1131 1132 1133
	/* get the /sys/devices/system/edac subsys reference */
	edac_subsys = edac_get_sysfs_subsys();
	if (edac_subsys == NULL) {
1134
		edac_dbg(1, "no edac_subsys\n");
1135 1136
		err = -EINVAL;
		goto out;
1137 1138
	}

1139
	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1140 1141 1142 1143
	if (!mci_pdev) {
		err = -ENOMEM;
		goto out_put_sysfs;
	}
1144 1145 1146 1147 1148

	mci_pdev->bus = edac_subsys;
	mci_pdev->type = &mc_attr_type;
	device_initialize(mci_pdev);
	dev_set_name(mci_pdev, "mc");
1149

1150
	err = device_add(mci_pdev);
1151
	if (err < 0)
1152
		goto out_dev_free;
1153

1154
	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1155

1156
	return 0;
1157 1158 1159 1160 1161 1162 1163

 out_dev_free:
	kfree(mci_pdev);
 out_put_sysfs:
	edac_put_sysfs_subsys();
 out:
	return err;
1164 1165
}

1166
void __exit edac_mc_sysfs_exit(void)
1167
{
1168
	device_unregister(mci_pdev);
1169
	edac_put_sysfs_subsys();
1170
}