edac_mc_sysfs.c 29.1 KB
Newer Older
1 2
/*
 * edac_mc kernel module
3 4
 * (C) 2005-2007 Linux Networx (http://lnxi.com)
 *
5 6 7
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
8
 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9
 *
10
 * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 12
 *	The entire API were re-written, and ported to use struct device
 *
13 14 15
 */

#include <linux/ctype.h>
16
#include <linux/slab.h>
17
#include <linux/edac.h>
18
#include <linux/bug.h>
19
#include <linux/pm_runtime.h>
20
#include <linux/uaccess.h>
21

22
#include "edac_core.h"
23 24 25
#include "edac_module.h"

/* MC EDAC Controls, setable by module parameter, and sysfs */
D
Dave Jiang 已提交
26 27
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
28
static int edac_mc_panic_on_ue;
D
Dave Jiang 已提交
29
static int edac_mc_poll_msec = 1000;
30 31

/* Getter functions for above */
D
Dave Jiang 已提交
32
int edac_mc_get_log_ue(void)
33
{
D
Dave Jiang 已提交
34
	return edac_mc_log_ue;
35 36
}

D
Dave Jiang 已提交
37
int edac_mc_get_log_ce(void)
38
{
D
Dave Jiang 已提交
39
	return edac_mc_log_ce;
40 41
}

D
Dave Jiang 已提交
42
int edac_mc_get_panic_on_ue(void)
43
{
D
Dave Jiang 已提交
44
	return edac_mc_panic_on_ue;
45 46
}

47 48 49
/* this is temporary */
int edac_mc_get_poll_msec(void)
{
D
Dave Jiang 已提交
50
	return edac_mc_poll_msec;
51 52
}

A
Arthur Jones 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
	long l;
	int ret;

	if (!val)
		return -EINVAL;

	ret = strict_strtol(val, 0, &l);
	if (ret == -EINVAL || ((int)l != l))
		return -EINVAL;
	*((int *)kp->arg) = l;

	/* notify edac_mc engine to reset the poll period */
	edac_mc_reset_delay_period(l);

	return 0;
}

72
/* Parameter declarations for above */
D
Dave Jiang 已提交
73 74 75 76
module_param(edac_mc_panic_on_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
module_param(edac_mc_log_ue, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ue,
77
		 "Log uncorrectable error to console: 0=off 1=on");
D
Dave Jiang 已提交
78 79
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
80
		 "Log correctable error to console: 0=off 1=on");
A
Arthur Jones 已提交
81 82
module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
		  &edac_mc_poll_msec, 0644);
D
Dave Jiang 已提交
83
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
84

85
static struct device *mci_pdev;
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100
/*
 * various constants for Memory Controllers
 */
static const char *mem_types[] = {
	[MEM_EMPTY] = "Empty",
	[MEM_RESERVED] = "Reserved",
	[MEM_UNKNOWN] = "Unknown",
	[MEM_FPM] = "FPM",
	[MEM_EDO] = "EDO",
	[MEM_BEDO] = "BEDO",
	[MEM_SDR] = "Unbuffered-SDR",
	[MEM_RDR] = "Registered-SDR",
	[MEM_DDR] = "Unbuffered-DDR",
	[MEM_RDDR] = "Registered-DDR",
101 102 103
	[MEM_RMBS] = "RMBS",
	[MEM_DDR2] = "Unbuffered-DDR2",
	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
104
	[MEM_RDDR2] = "Registered-DDR2",
105 106 107
	[MEM_XDR] = "XDR",
	[MEM_DDR3] = "Unbuffered-DDR3",
	[MEM_RDDR3] = "Registered-DDR3"
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
};

static const char *dev_types[] = {
	[DEV_UNKNOWN] = "Unknown",
	[DEV_X1] = "x1",
	[DEV_X2] = "x2",
	[DEV_X4] = "x4",
	[DEV_X8] = "x8",
	[DEV_X16] = "x16",
	[DEV_X32] = "x32",
	[DEV_X64] = "x64"
};

static const char *edac_caps[] = {
	[EDAC_UNKNOWN] = "Unknown",
	[EDAC_NONE] = "None",
	[EDAC_RESERVED] = "Reserved",
	[EDAC_PARITY] = "PARITY",
	[EDAC_EC] = "EC",
	[EDAC_SECDED] = "SECDED",
	[EDAC_S2ECD2ED] = "S2ECD2ED",
	[EDAC_S4ECD4ED] = "S4ECD4ED",
	[EDAC_S8ECD8ED] = "S8ECD8ED",
	[EDAC_S16ECD16ED] = "S16ECD16ED"
};

134
#ifdef CONFIG_EDAC_LEGACY_SYSFS
135 136 137 138 139 140 141 142 143
/*
 * EDAC sysfs CSROW data structures and methods
 */

#define to_csrow(k) container_of(k, struct csrow_info, dev)

/*
 * We need it to avoid namespace conflicts between the legacy API
 * and the per-dimm/per-rank one
144
 */
145 146 147 148 149 150 151 152 153 154 155 156 157
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
	struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)

struct dev_ch_attribute {
	struct device_attribute attr;
	int channel;
};

#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
	struct dev_ch_attribute dev_attr_legacy_##_name = \
		{ __ATTR(_name, _mode, _show, _store), (_var) }

#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
158 159

/* Set of more default csrow<id> attribute show/store functions */
160 161
static ssize_t csrow_ue_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
162
{
163 164
	struct csrow_info *csrow = to_csrow(dev);

165
	return sprintf(data, "%u\n", csrow->ue_count);
166 167
}

168 169
static ssize_t csrow_ce_count_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
170
{
171 172
	struct csrow_info *csrow = to_csrow(dev);

173
	return sprintf(data, "%u\n", csrow->ce_count);
174 175
}

176 177
static ssize_t csrow_size_show(struct device *dev,
			       struct device_attribute *mattr, char *data)
178
{
179
	struct csrow_info *csrow = to_csrow(dev);
180 181 182
	int i;
	u32 nr_pages = 0;

183 184 185
	if (csrow->mci->csbased)
		return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));

186
	for (i = 0; i < csrow->nr_channels; i++)
187
		nr_pages += csrow->channels[i]->dimm->nr_pages;
188
	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
189 190
}

191 192
static ssize_t csrow_mem_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
193
{
194 195
	struct csrow_info *csrow = to_csrow(dev);

196
	return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
197 198
}

199 200
static ssize_t csrow_dev_type_show(struct device *dev,
				   struct device_attribute *mattr, char *data)
201
{
202 203
	struct csrow_info *csrow = to_csrow(dev);

204
	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
205 206
}

207 208 209
static ssize_t csrow_edac_mode_show(struct device *dev,
				    struct device_attribute *mattr,
				    char *data)
210
{
211 212
	struct csrow_info *csrow = to_csrow(dev);

213
	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
214 215 216
}

/* show/store functions for DIMM Label attributes */
217 218 219
static ssize_t channel_dimm_label_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
220
{
221 222
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
223
	struct rank_info *rank = csrow->channels[chan];
224

225
	/* if field has not been initialized, there is nothing to send */
226
	if (!rank->dimm->label[0])
227 228 229
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
230
			rank->dimm->label);
231 232
}

233 234 235
static ssize_t channel_dimm_label_store(struct device *dev,
					struct device_attribute *mattr,
					const char *data, size_t count)
236
{
237 238
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
239
	struct rank_info *rank = csrow->channels[chan];
240

241 242
	ssize_t max_size = 0;

243
	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
244 245
	strncpy(rank->dimm->label, data, max_size);
	rank->dimm->label[max_size] = '\0';
246 247 248 249 250

	return max_size;
}

/* show function for dynamic chX_ce_count attribute */
251 252
static ssize_t channel_ce_count_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
253
{
254 255
	struct csrow_info *csrow = to_csrow(dev);
	unsigned chan = to_channel(mattr);
256
	struct rank_info *rank = csrow->channels[chan];
257 258

	return sprintf(data, "%u\n", rank->ce_count);
259 260
}

261 262 263 264 265 266 267
/* cwrow<id>/attribute files */
DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
268

269 270 271 272 273 274 275 276 277 278
/* default attributes of the CSROW<id> object */
static struct attribute *csrow_attrs[] = {
	&dev_attr_legacy_dev_type.attr,
	&dev_attr_legacy_mem_type.attr,
	&dev_attr_legacy_edac_mode.attr,
	&dev_attr_legacy_size_mb.attr,
	&dev_attr_legacy_ue_count.attr,
	&dev_attr_legacy_ce_count.attr,
	NULL,
};
279

280 281 282
static struct attribute_group csrow_attr_grp = {
	.attrs	= csrow_attrs,
};
283

284 285 286 287
static const struct attribute_group *csrow_attr_groups[] = {
	&csrow_attr_grp,
	NULL
};
288

289
static void csrow_attr_release(struct device *dev)
290
{
291 292
	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);

293
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
294
	kfree(csrow);
295 296
}

297 298 299
static struct device_type csrow_attr_type = {
	.groups		= csrow_attr_groups,
	.release	= csrow_attr_release,
300 301
};

302 303 304 305
/*
 * possible dynamic channel DIMM Label attribute files
 *
 */
306

307
#define EDAC_NR_CHANNELS	6
308

309
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
310
	channel_dimm_label_show, channel_dimm_label_store, 0);
311
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
312
	channel_dimm_label_show, channel_dimm_label_store, 1);
313
DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
314
	channel_dimm_label_show, channel_dimm_label_store, 2);
315
DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
316
	channel_dimm_label_show, channel_dimm_label_store, 3);
317
DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
318
	channel_dimm_label_show, channel_dimm_label_store, 4);
319
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
320
	channel_dimm_label_show, channel_dimm_label_store, 5);
321 322

/* Total possible dynamic DIMM Label attribute file table */
323 324 325 326 327 328 329
static struct device_attribute *dynamic_csrow_dimm_attr[] = {
	&dev_attr_legacy_ch0_dimm_label.attr,
	&dev_attr_legacy_ch1_dimm_label.attr,
	&dev_attr_legacy_ch2_dimm_label.attr,
	&dev_attr_legacy_ch3_dimm_label.attr,
	&dev_attr_legacy_ch4_dimm_label.attr,
	&dev_attr_legacy_ch5_dimm_label.attr
330 331 332
};

/* possible dynamic channel ce_count attribute files */
333 334 335 336 337 338 339 340 341 342 343 344
DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 0);
DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 1);
DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 2);
DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 3);
DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 4);
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
		   channel_ce_count_show, NULL, 5);
345 346

/* Total possible dynamic ce_count attribute file table */
347 348 349 350 351 352 353
static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
	&dev_attr_legacy_ch0_ce_count.attr,
	&dev_attr_legacy_ch1_ce_count.attr,
	&dev_attr_legacy_ch2_ce_count.attr,
	&dev_attr_legacy_ch3_ce_count.attr,
	&dev_attr_legacy_ch4_ce_count.attr,
	&dev_attr_legacy_ch5_ce_count.attr
354 355
};

356 357 358 359 360
static inline int nr_pages_per_csrow(struct csrow_info *csrow)
{
	int chan, nr_pages = 0;

	for (chan = 0; chan < csrow->nr_channels; chan++)
361
		nr_pages += csrow->channels[chan]->dimm->nr_pages;
362 363 364 365

	return nr_pages;
}

366 367 368
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
				    struct csrow_info *csrow, int index)
369
{
370
	int err, chan;
371

372 373
	if (csrow->nr_channels >= EDAC_NR_CHANNELS)
		return -ENODEV;
374

375 376 377 378
	csrow->dev.type = &csrow_attr_type;
	csrow->dev.bus = &mci->bus;
	device_initialize(&csrow->dev);
	csrow->dev.parent = &mci->dev;
B
Borislav Petkov 已提交
379
	csrow->mci = mci;
380 381
	dev_set_name(&csrow->dev, "csrow%d", index);
	dev_set_drvdata(&csrow->dev, csrow);
382

383 384
	edac_dbg(0, "creating (virtual) csrow node %s\n",
		 dev_name(&csrow->dev));
385

386 387 388
	err = device_add(&csrow->dev);
	if (err < 0)
		return err;
389

390
	for (chan = 0; chan < csrow->nr_channels; chan++) {
391
		/* Only expose populated DIMMs */
392
		if (!csrow->channels[chan]->dimm->nr_pages)
393
			continue;
394 395 396 397 398 399 400 401 402 403 404 405
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_dimm_attr[chan]);
		if (err < 0)
			goto error;
		err = device_create_file(&csrow->dev,
					 dynamic_csrow_ce_count_attr[chan]);
		if (err < 0) {
			device_remove_file(&csrow->dev,
					   dynamic_csrow_dimm_attr[chan]);
			goto error;
		}
	}
406

407
	return 0;
408

409 410 411 412 413 414 415 416
error:
	for (--chan; chan >= 0; chan--) {
		device_remove_file(&csrow->dev,
					dynamic_csrow_dimm_attr[chan]);
		device_remove_file(&csrow->dev,
					   dynamic_csrow_ce_count_attr[chan]);
	}
	put_device(&csrow->dev);
417

418 419
	return err;
}
420 421

/* Create a CSROW object under specifed edac_mc_device */
422
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
423
{
424 425
	int err, i, chan;
	struct csrow_info *csrow;
426

427
	for (i = 0; i < mci->nr_csrows; i++) {
428
		csrow = mci->csrows[i];
429 430
		if (!nr_pages_per_csrow(csrow))
			continue;
431
		err = edac_create_csrow_object(mci, mci->csrows[i], i);
432 433 434 435
		if (err < 0) {
			edac_dbg(1,
				 "failure: create csrow objects for csrow %d\n",
				 i);
436
			goto error;
437
		}
438 439
	}
	return 0;
440

441 442
error:
	for (--i; i >= 0; i--) {
443
		csrow = mci->csrows[i];
444 445
		if (!nr_pages_per_csrow(csrow))
			continue;
446
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
447
			if (!csrow->channels[chan]->dimm->nr_pages)
448
				continue;
449 450 451 452 453
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
		}
454
		put_device(&mci->csrows[i]->dev);
455
	}
456

457 458
	return err;
}
459

460 461 462 463
static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
{
	int i, chan;
	struct csrow_info *csrow;
464

465
	for (i = mci->nr_csrows - 1; i >= 0; i--) {
466
		csrow = mci->csrows[i];
467 468
		if (!nr_pages_per_csrow(csrow))
			continue;
469
		for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
470
			if (!csrow->channels[chan]->dimm->nr_pages)
471
				continue;
472 473
			edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
				 i, chan);
474 475 476 477
			device_remove_file(&csrow->dev,
						dynamic_csrow_dimm_attr[chan]);
			device_remove_file(&csrow->dev,
						dynamic_csrow_ce_count_attr[chan]);
478
		}
479
		device_unregister(&mci->csrows[i]->dev);
480 481
	}
}
482 483 484 485 486 487 488 489 490 491 492 493 494 495
#endif

/*
 * Per-dimm (or per-rank) devices
 */

#define to_dimm(k) container_of(k, struct dimm_info, dev)

/* show/store functions for DIMM Label attributes */
static ssize_t dimmdev_location_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

496
	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
}

static ssize_t dimmdev_label_show(struct device *dev,
				  struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	/* if field has not been initialized, there is nothing to send */
	if (!dimm->label[0])
		return 0;

	return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
}

static ssize_t dimmdev_label_store(struct device *dev,
				   struct device_attribute *mattr,
				   const char *data,
				   size_t count)
{
	struct dimm_info *dimm = to_dimm(dev);

	ssize_t max_size = 0;

	max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
	strncpy(dimm->label, data, max_size);
	dimm->label[max_size] = '\0';

	return max_size;
}

static ssize_t dimmdev_size_show(struct device *dev,
				 struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}

static ssize_t dimmdev_mem_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", mem_types[dimm->mtype]);
}

static ssize_t dimmdev_dev_type_show(struct device *dev,
				     struct device_attribute *mattr, char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
}

static ssize_t dimmdev_edac_mode_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *data)
{
	struct dimm_info *dimm = to_dimm(dev);

	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
}

/* dimm/rank attribute files */
static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
		   dimmdev_label_show, dimmdev_label_store);
static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);

/* attributes of the dimm<id>/rank<id> object */
static struct attribute *dimm_attrs[] = {
	&dev_attr_dimm_label.attr,
	&dev_attr_dimm_location.attr,
	&dev_attr_size.attr,
	&dev_attr_dimm_mem_type.attr,
	&dev_attr_dimm_dev_type.attr,
	&dev_attr_dimm_edac_mode.attr,
	NULL,
};

static struct attribute_group dimm_attr_grp = {
	.attrs	= dimm_attrs,
};

static const struct attribute_group *dimm_attr_groups[] = {
	&dimm_attr_grp,
	NULL
};

589
static void dimm_attr_release(struct device *dev)
590
{
591 592
	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);

593
	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
594
	kfree(dimm);
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
}

static struct device_type dimm_attr_type = {
	.groups		= dimm_attr_groups,
	.release	= dimm_attr_release,
};

/* Create a DIMM object under specifed memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
				   struct dimm_info *dimm,
				   int index)
{
	int err;
	dimm->mci = mci;

	dimm->dev.type = &dimm_attr_type;
	dimm->dev.bus = &mci->bus;
	device_initialize(&dimm->dev);

	dimm->dev.parent = &mci->dev;
	if (mci->mem_is_per_rank)
		dev_set_name(&dimm->dev, "rank%d", index);
	else
		dev_set_name(&dimm->dev, "dimm%d", index);
	dev_set_drvdata(&dimm->dev, dimm);
	pm_runtime_forbid(&mci->dev);

	err =  device_add(&dimm->dev);

624
	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
625 626 627

	return err;
}
628

629 630 631 632 633
/*
 * Memory controller device
 */

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
634

635 636
static ssize_t mci_reset_counters_store(struct device *dev,
					struct device_attribute *mattr,
637
					const char *data, size_t count)
638
{
639 640
	struct mem_ctl_info *mci = to_mci(dev);
	int cnt, row, chan, i;
641 642
	mci->ue_mc = 0;
	mci->ce_mc = 0;
643 644
	mci->ue_noinfo_count = 0;
	mci->ce_noinfo_count = 0;
645 646

	for (row = 0; row < mci->nr_csrows; row++) {
647
		struct csrow_info *ri = mci->csrows[row];
648 649 650 651 652

		ri->ue_count = 0;
		ri->ce_count = 0;

		for (chan = 0; chan < ri->nr_channels; chan++)
653
			ri->channels[chan]->ce_count = 0;
654 655
	}

656 657 658 659 660 661 662
	cnt = 1;
	for (i = 0; i < mci->n_layers; i++) {
		cnt *= mci->layers[i].size;
		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
	}

663 664 665 666
	mci->start_time = jiffies;
	return count;
}

667 668 669 670 671 672 673 674 675
/* Memory scrubbing interface:
 *
 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 *
 * Negative value still means that an error has occurred while setting
 * the scrub rate.
 */
676 677
static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
					  struct device_attribute *mattr,
678
					  const char *data, size_t count)
679
{
680
	struct mem_ctl_info *mci = to_mci(dev);
681
	unsigned long bandwidth = 0;
682
	int new_bw = 0;
683

684 685
	if (strict_strtoul(data, 10, &bandwidth) < 0)
		return -EINVAL;
686

687
	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
688 689 690 691
	if (new_bw < 0) {
		edac_printk(KERN_WARNING, EDAC_MC,
			    "Error setting scrub rate to: %lu\n", bandwidth);
		return -EINVAL;
692
	}
693

694
	return count;
695 696
}

697 698 699
/*
 * ->get_sdram_scrub_rate() return value semantics same as above.
 */
700 701 702
static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
703
{
704
	struct mem_ctl_info *mci = to_mci(dev);
705
	int bandwidth = 0;
706

707 708
	bandwidth = mci->get_sdram_scrub_rate(mci);
	if (bandwidth < 0) {
709
		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
710
		return bandwidth;
711
	}
712 713

	return sprintf(data, "%d\n", bandwidth);
714 715 716
}

/* default attribute files for the MCI object */
717 718 719
static ssize_t mci_ue_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
720
{
721 722
	struct mem_ctl_info *mci = to_mci(dev);

723
	return sprintf(data, "%d\n", mci->ue_mc);
724 725
}

726 727 728
static ssize_t mci_ce_count_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
729
{
730 731
	struct mem_ctl_info *mci = to_mci(dev);

732
	return sprintf(data, "%d\n", mci->ce_mc);
733 734
}

735 736 737
static ssize_t mci_ce_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
738
{
739 740
	struct mem_ctl_info *mci = to_mci(dev);

741
	return sprintf(data, "%d\n", mci->ce_noinfo_count);
742 743
}

744 745 746
static ssize_t mci_ue_noinfo_show(struct device *dev,
				  struct device_attribute *mattr,
				  char *data)
747
{
748 749
	struct mem_ctl_info *mci = to_mci(dev);

750
	return sprintf(data, "%d\n", mci->ue_noinfo_count);
751 752
}

753 754 755
static ssize_t mci_seconds_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
756
{
757 758
	struct mem_ctl_info *mci = to_mci(dev);

759
	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
760 761
}

762 763 764
static ssize_t mci_ctl_name_show(struct device *dev,
				 struct device_attribute *mattr,
				 char *data)
765
{
766 767
	struct mem_ctl_info *mci = to_mci(dev);

768
	return sprintf(data, "%s\n", mci->ctl_name);
769 770
}

771 772 773
static ssize_t mci_size_mb_show(struct device *dev,
				struct device_attribute *mattr,
				char *data)
774
{
775
	struct mem_ctl_info *mci = to_mci(dev);
776
	int total_pages = 0, csrow_idx, j;
777

778
	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
779
		struct csrow_info *csrow = mci->csrows[csrow_idx];
780

J
Josh Hunt 已提交
781 782 783 784 785 786 787 788
		if (csrow->mci->csbased) {
			total_pages += csrow->nr_pages;
		} else {
			for (j = 0; j < csrow->nr_channels; j++) {
				struct dimm_info *dimm = csrow->channels[j]->dimm;

				total_pages += dimm->nr_pages;
			}
789
		}
790 791
	}

792
	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
793 794
}

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
static ssize_t mci_max_location_show(struct device *dev,
				     struct device_attribute *mattr,
				     char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
	int i;
	char *p = data;

	for (i = 0; i < mci->n_layers; i++) {
		p += sprintf(p, "%s %d ",
			     edac_layer_name[mci->layers[i].type],
			     mci->layers[i].size - 1);
	}

	return p - data;
}

812 813 814 815 816 817 818 819
#ifdef CONFIG_EDAC_DEBUG
static ssize_t edac_fake_inject_write(struct file *file,
				      const char __user *data,
				      size_t count, loff_t *ppos)
{
	struct device *dev = file->private_data;
	struct mem_ctl_info *mci = to_mci(dev);
	static enum hw_event_mc_err_type type;
820 821 822 823
	u16 errcount = mci->fake_inject_count;

	if (!errcount)
		errcount = 1;
824 825 826 827 828

	type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
				   : HW_EVENT_ERR_CORRECTED;

	printk(KERN_DEBUG
829 830
	       "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
		errcount,
831
		(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
832
		errcount > 1 ? "s" : "",
833 834 835 836
		mci->fake_inject_layer[0],
		mci->fake_inject_layer[1],
		mci->fake_inject_layer[2]
	       );
837
	edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
838 839 840
			     mci->fake_inject_layer[0],
			     mci->fake_inject_layer[1],
			     mci->fake_inject_layer[2],
841
			     "FAKE ERROR", "for EDAC testing only");
842 843 844 845 846

	return count;
}

static const struct file_operations debug_fake_inject_fops = {
W
Wei Yongjun 已提交
847
	.open = simple_open,
848 849 850 851 852
	.write = edac_fake_inject_write,
	.llseek = generic_file_llseek,
};
#endif

853
/* default Control file */
854
DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
855 856

/* default Attribute files */
857 858 859 860 861 862 863
DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
864
DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
865 866

/* memory scrubber attribute file */
867
DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
868

869 870 871 872 873 874 875 876 877
static struct attribute *mci_attrs[] = {
	&dev_attr_reset_counters.attr,
	&dev_attr_mc_name.attr,
	&dev_attr_size_mb.attr,
	&dev_attr_seconds_since_reset.attr,
	&dev_attr_ue_noinfo_count.attr,
	&dev_attr_ce_noinfo_count.attr,
	&dev_attr_ue_count.attr,
	&dev_attr_ce_count.attr,
878
	&dev_attr_max_location.attr,
879 880 881
	NULL
};

882 883
static struct attribute_group mci_attr_grp = {
	.attrs	= mci_attrs,
884 885
};

886 887 888
static const struct attribute_group *mci_attr_groups[] = {
	&mci_attr_grp,
	NULL
889 890
};

891
static void mci_attr_release(struct device *dev)
892
{
893 894
	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);

895
	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
896
	kfree(mci);
897 898
}

899 900 901 902
static struct device_type mci_attr_type = {
	.groups		= mci_attr_groups,
	.release	= mci_attr_release,
};
903

904
#ifdef CONFIG_EDAC_DEBUG
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
static struct dentry *edac_debugfs;

int __init edac_debugfs_init(void)
{
	edac_debugfs = debugfs_create_dir("edac", NULL);
	if (IS_ERR(edac_debugfs)) {
		edac_debugfs = NULL;
		return -ENOMEM;
	}
	return 0;
}

void __exit edac_debugfs_exit(void)
{
	debugfs_remove(edac_debugfs);
}

922 923 924 925 926 927
int edac_create_debug_nodes(struct mem_ctl_info *mci)
{
	struct dentry *d, *parent;
	char name[80];
	int i;

928 929 930 931
	if (!edac_debugfs)
		return -ENODEV;

	d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	if (!d)
		return -ENOMEM;
	parent = d;

	for (i = 0; i < mci->n_layers; i++) {
		sprintf(name, "fake_inject_%s",
			     edac_layer_name[mci->layers[i].type]);
		d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
				      &mci->fake_inject_layer[i]);
		if (!d)
			goto nomem;
	}

	d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_ue);
	if (!d)
		goto nomem;

950 951 952 953 954
	d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
				&mci->fake_inject_count);
	if (!d)
		goto nomem;

955 956 957 958 959 960
	d = debugfs_create_file("fake_inject", S_IWUSR, parent,
				&mci->dev,
				&debug_fake_inject_fops);
	if (!d)
		goto nomem;

961
	mci->debugfs = parent;
962 963 964 965 966 967 968
	return 0;
nomem:
	debugfs_remove(mci->debugfs);
	return -ENOMEM;
}
#endif

969 970 971 972 973 974 975 976 977 978
/*
 * Create a new Memory Controller kobject instance,
 *	mc<id> under the 'mc' directory
 *
 * Return:
 *	0	Success
 *	!0	Failure
 */
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
979
	int i, err;
980

981 982 983 984 985 986 987
	/*
	 * The memory controller needs its own bus, in order to avoid
	 * namespace conflicts at /sys/bus/edac.
	 */
	mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
	if (!mci->bus.name)
		return -ENOMEM;
988
	edac_dbg(0, "creating bus %s\n", mci->bus.name);
989 990 991
	err = bus_register(&mci->bus);
	if (err < 0)
		return err;
992

993 994 995
	/* get the /sys/devices/system/edac subsys reference */
	mci->dev.type = &mci_attr_type;
	device_initialize(&mci->dev);
996

997
	mci->dev.parent = mci_pdev;
998 999 1000 1001 1002
	mci->dev.bus = &mci->bus;
	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
	dev_set_drvdata(&mci->dev, mci);
	pm_runtime_forbid(&mci->dev);

1003
	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1004 1005
	err = device_add(&mci->dev);
	if (err < 0) {
1006
		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1007 1008 1009
		bus_unregister(&mci->bus);
		kfree(mci->bus.name);
		return err;
1010 1011
	}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
		if (mci->get_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
			dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
		}
		if (mci->set_sdram_scrub_rate) {
			dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
			dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
		}
		err = device_create_file(&mci->dev,
					 &dev_attr_sdram_scrub_rate);
		if (err) {
			edac_dbg(1, "failure: create sdram_scrub_rate\n");
			goto fail2;
		}
	}
1028 1029
	/*
	 * Create the dimm/rank devices
1030
	 */
1031
	for (i = 0; i < mci->tot_dimms; i++) {
1032
		struct dimm_info *dimm = mci->dimms[i];
1033 1034 1035 1036
		/* Only expose populated DIMMs */
		if (dimm->nr_pages == 0)
			continue;
#ifdef CONFIG_EDAC_DEBUG
1037
		edac_dbg(1, "creating dimm%d, located at ", i);
1038 1039 1040 1041 1042 1043 1044
		if (edac_debug_level >= 1) {
			int lay;
			for (lay = 0; lay < mci->n_layers; lay++)
				printk(KERN_CONT "%s %d ",
					edac_layer_name[mci->layers[lay].type],
					dimm->location[lay]);
			printk(KERN_CONT "\n");
1045
		}
1046
#endif
1047 1048
		err = edac_create_dimm_object(mci, dimm, i);
		if (err) {
1049
			edac_dbg(1, "failure: create dimm %d obj\n", i);
1050 1051
			goto fail;
		}
1052 1053
	}

1054
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1055 1056 1057
	err = edac_create_csrow_objects(mci);
	if (err < 0)
		goto fail;
1058
#endif
1059

1060 1061 1062
#ifdef CONFIG_EDAC_DEBUG
	edac_create_debug_nodes(mci);
#endif
1063 1064
	return 0;

1065
fail:
1066
	for (i--; i >= 0; i--) {
1067
		struct dimm_info *dimm = mci->dimms[i];
1068 1069
		if (dimm->nr_pages == 0)
			continue;
1070
		device_unregister(&dimm->dev);
1071
	}
1072
fail2:
1073
	device_unregister(&mci->dev);
1074 1075
	bus_unregister(&mci->bus);
	kfree(mci->bus.name);
1076 1077 1078 1079 1080 1081 1082 1083
	return err;
}

/*
 * remove a Memory Controller instance
 */
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
1084
	int i;
1085

1086
	edac_dbg(0, "\n");
1087

1088 1089 1090
#ifdef CONFIG_EDAC_DEBUG
	debugfs_remove(mci->debugfs);
#endif
1091
#ifdef CONFIG_EDAC_LEGACY_SYSFS
1092
	edac_delete_csrow_objects(mci);
1093
#endif
1094

1095
	for (i = 0; i < mci->tot_dimms; i++) {
1096
		struct dimm_info *dimm = mci->dimms[i];
1097 1098
		if (dimm->nr_pages == 0)
			continue;
1099
		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1100
		device_unregister(&dimm->dev);
1101
	}
1102
}
1103

1104 1105
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
1106
	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1107
	device_unregister(&mci->dev);
1108 1109 1110
	bus_unregister(&mci->bus);
	kfree(mci->bus.name);
}
1111

1112
static void mc_attr_release(struct device *dev)
1113
{
1114 1115 1116 1117 1118
	/*
	 * There's no container structure here, as this is just the mci
	 * parent device, used to create the /sys/devices/mc sysfs node.
	 * So, there are no attributes on it.
	 */
1119
	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1120
	kfree(dev);
1121
}
1122

1123 1124 1125
static struct device_type mc_attr_type = {
	.release	= mc_attr_release,
};
1126
/*
1127
 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1128
 */
1129
int __init edac_mc_sysfs_init(void)
1130
{
1131
	struct bus_type *edac_subsys;
1132
	int err;
1133

1134 1135 1136
	/* get the /sys/devices/system/edac subsys reference */
	edac_subsys = edac_get_sysfs_subsys();
	if (edac_subsys == NULL) {
1137
		edac_dbg(1, "no edac_subsys\n");
1138 1139
		err = -EINVAL;
		goto out;
1140 1141
	}

1142
	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1143 1144 1145 1146
	if (!mci_pdev) {
		err = -ENOMEM;
		goto out_put_sysfs;
	}
1147 1148 1149 1150 1151

	mci_pdev->bus = edac_subsys;
	mci_pdev->type = &mc_attr_type;
	device_initialize(mci_pdev);
	dev_set_name(mci_pdev, "mc");
1152

1153
	err = device_add(mci_pdev);
1154
	if (err < 0)
1155
		goto out_dev_free;
1156

1157
	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1158

1159
	return 0;
1160 1161 1162 1163 1164 1165 1166

 out_dev_free:
	kfree(mci_pdev);
 out_put_sysfs:
	edac_put_sysfs_subsys();
 out:
	return err;
1167 1168
}

1169
void __exit edac_mc_sysfs_exit(void)
1170
{
1171
	device_unregister(mci_pdev);
1172
	edac_put_sysfs_subsys();
1173
}