edac_device.c 20.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

/*
 * edac_device.c
 * (C) 2007 www.douglaskthompson.com
 *
 * This file may be distributed under the terms of the
 * GNU General Public License.
 *
 * Written by Doug Thompson <norsk5@xmission.com>
 *
 * edac_device API implementation
 * 19 Jan 2007
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/timer.h>
#include <linux/slab.h>
23
#include <linux/jiffies.h>
24 25 26 27 28 29 30 31 32 33 34
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
#include <asm/page.h>

#include "edac_core.h"
#include "edac_module.h"

35 36 37
/* lock for the list: 'edac_device_list', manipulation of this list
 * is protected by the 'device_ctls_mutex' lock
 */
38
static DEFINE_MUTEX(device_ctls_mutex);
39
static LIST_HEAD(edac_device_list);
40 41 42 43

#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
44
	debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
45 46 47 48 49 50
	debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
	debugf3("\tdev = %p\n", edac_dev->dev);
	debugf3("\tmod_name:ctl_name = %s:%s\n",
		edac_dev->mod_name, edac_dev->ctl_name);
	debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
}
51
#endif				/* CONFIG_EDAC_DEBUG */
52

53

54
/*
55 56 57 58 59 60 61 62 63 64 65 66
 * edac_device_alloc_ctl_info()
 *	Allocate a new edac device control info structure
 *
 *	The control structure is allocated in complete chunk
 *	from the OS. It is in turn sub allocated to the
 *	various objects that compose the struture
 *
 *	The structure has a 'nr_instance' array within itself.
 *	Each instance represents a major component
 *		Example:  L1 cache and L2 cache are 2 instance components
 *
 *	Within each instance is an array of 'nr_blocks' blockoffsets
67 68 69
 */
struct edac_device_ctl_info *edac_device_alloc_ctl_info(
	unsigned sz_private,
70 71 72
	char *edac_device_name, unsigned nr_instances,
	char *edac_block_name, unsigned nr_blocks,
	unsigned offset_value,		/* zero, 1, or other based offset */
73 74
	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
	int device_index)
75 76 77 78
{
	struct edac_device_ctl_info *dev_ctl;
	struct edac_device_instance *dev_inst, *inst;
	struct edac_device_block *dev_blk, *blk_p, *blk;
79
	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
80 81 82 83
	unsigned total_size;
	unsigned count;
	unsigned instance, block, attr;
	void *pvt;
84
	int err;
85

86
	debugf4("%s() instances=%d blocks=%d\n",
87
		__func__, nr_instances, nr_blocks);
88

89 90 91 92
	/* Calculate the size of memory we need to allocate AND
	 * determine the offsets of the various item arrays
	 * (instance,block,attrib) from the start of an  allocated structure.
	 * We want the alignment of each item  (instance,block,attrib)
93 94 95
	 * to be at least as stringent as what the compiler would
	 * provide if we could simply hardcode everything into a single struct.
	 */
96
	dev_ctl = (struct edac_device_ctl_info *)NULL;
97

98 99 100
	/* Calc the 'end' offset past end of ONE ctl_info structure
	 * which will become the start of the 'instance' array
	 */
101
	dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
102

103 104 105
	/* Calc the 'end' offset past the instance array within the ctl_info
	 * which will become the start of the block array
	 */
106
	dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
107

108 109 110
	/* Calc the 'end' offset past the dev_blk array
	 * which will become the start of the attrib array, if any.
	 */
111
	count = nr_instances * nr_blocks;
112
	dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
113

114 115 116 117
	/* Check for case of when an attribute array is specified */
	if (nr_attrib > 0) {
		/* calc how many nr_attrib we need */
		count *= nr_attrib;
118

119 120 121 122 123 124 125 126 127 128 129
		/* Calc the 'end' offset past the attributes array */
		pvt = edac_align_ptr(&dev_attrib[count], sz_private);
	} else {
		/* no attribute array specificed */
		pvt = edac_align_ptr(dev_attrib, sz_private);
	}

	/* 'pvt' now points to where the private data area is.
	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
	 * is baselined at ZERO
	 */
130
	total_size = ((unsigned long)pvt) + sz_private;
131 132

	/* Allocate the amount of memory for the set of control structures */
133 134
	dev_ctl = kzalloc(total_size, GFP_KERNEL);
	if (dev_ctl == NULL)
135 136
		return NULL;

137 138 139 140 141 142
	/* Adjust pointers so they point within the actual memory we
	 * just allocated rather than an imaginary chunk of memory
	 * located at address 0.
	 * 'dev_ctl' points to REAL memory, while the others are
	 * ZERO based and thus need to be adjusted to point within
	 * the allocated memory.
143 144
	 */
	dev_inst = (struct edac_device_instance *)
145
		(((char *)dev_ctl) + ((unsigned long)dev_inst));
146
	dev_blk = (struct edac_device_block *)
147
		(((char *)dev_ctl) + ((unsigned long)dev_blk));
148
	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
149
		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
150
	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
151

152
	/* Begin storing the information into the control info structure */
153
	dev_ctl->dev_idx = device_index;
154 155 156 157
	dev_ctl->nr_instances = nr_instances;
	dev_ctl->instances = dev_inst;
	dev_ctl->pvt_info = pvt;

158 159 160 161
	/* Default logging of CEs and UEs */
	dev_ctl->log_ce = 1;
	dev_ctl->log_ue = 1;

162 163
	/* Name of this edac device */
	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
164

165 166 167
	debugf4("%s() edac_dev=%p next after end=%p\n",
		__func__, dev_ctl, pvt + sz_private );

168 169 170 171 172 173 174 175 176 177
	/* Initialize every Instance */
	for (instance = 0; instance < nr_instances; instance++) {
		inst = &dev_inst[instance];
		inst->ctl = dev_ctl;
		inst->nr_blocks = nr_blocks;
		blk_p = &dev_blk[instance * nr_blocks];
		inst->blocks = blk_p;

		/* name of this instance */
		snprintf(inst->name, sizeof(inst->name),
178
			 "%s%u", edac_device_name, instance);
179 180

		/* Initialize every block in each instance */
181
		for (block = 0; block < nr_blocks; block++) {
182 183 184
			blk = &blk_p[block];
			blk->instance = inst;
			snprintf(blk->name, sizeof(blk->name),
185
				 "%s%d", edac_block_name, block+offset_value);
186

187 188 189 190
			debugf4("%s() instance=%d inst_p=%p block=#%d "
				"block_p=%p name='%s'\n",
				__func__, instance, inst, block,
				blk, blk->name);
191

192 193 194 195 196 197 198 199 200 201 202
			/* if there are NO attributes OR no attribute pointer
			 * then continue on to next block iteration
			 */
			if ((nr_attrib == 0) || (attrib_spec == NULL))
				continue;

			/* setup the attribute array for this block */
			blk->nr_attribs = nr_attrib;
			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
			blk->block_attributes = attrib_p;

203 204 205
			debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
				__func__, blk->block_attributes);

206 207
			/* Initialize every user specified attribute in this
			 * block with the data the caller passed in
208 209
			 * Each block gets its own copy of pointers,
			 * and its unique 'value'
210 211 212 213
			 */
			for (attr = 0; attr < nr_attrib; attr++) {
				attrib = &attrib_p[attr];

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
				/* populate the unique per attrib
				 * with the code pointers and info
				 */
				attrib->attr = attrib_spec[attr].attr;
				attrib->show = attrib_spec[attr].show;
				attrib->store = attrib_spec[attr].store;

				attrib->block = blk;	/* up link */

				debugf4("%s() alloc-attrib=%p attrib_name='%s' "
					"attrib-spec=%p spec-name=%s\n",
					__func__, attrib, attrib->attr.name,
					&attrib_spec[attr],
					attrib_spec[attr].attr.name
					);
229 230 231 232 233 234 235
			}
		}
	}

	/* Mark this instance as merely ALLOCATED */
	dev_ctl->op_state = OP_ALLOC;

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	/*
	 * Initialize the 'root' kobj for the edac_device controller
	 */
	err = edac_device_register_sysfs_main_kobj(dev_ctl);
	if (err) {
		kfree(dev_ctl);
		return NULL;
	}

	/* at this point, the root kobj is valid, and in order to
	 * 'free' the object, then the function:
	 *	edac_device_unregister_sysfs_main_kobj() must be called
	 * which will perform kobj unregistration and the actual free
	 * will occur during the kobject callback operation
	 */

252 253 254 255 256 257 258 259 260
	return dev_ctl;
}
EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);

/*
 * edac_device_free_ctl_info()
 *	frees the memory allocated by the edac_device_alloc_ctl_info()
 *	function
 */
261 262
void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
{
263
	edac_device_unregister_sysfs_main_kobj(ctl_info);
264
}
265
EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
266 267 268 269

/*
 * find_edac_device_by_dev
 *	scans the edac_device list for a specific 'struct device *'
270 271 272 273 274 275
 *
 *	lock to be held prior to call:	device_ctls_mutex
 *
 *	Return:
 *		pointer to control structure managing 'dev'
 *		NULL if not found on list
276
 */
277
static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
278 279 280 281
{
	struct edac_device_ctl_info *edac_dev;
	struct list_head *item;

282
	debugf0("%s()\n", __func__);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	list_for_each(item, &edac_device_list) {
		edac_dev = list_entry(item, struct edac_device_ctl_info, link);

		if (edac_dev->dev == dev)
			return edac_dev;
	}

	return NULL;
}

/*
 * add_edac_dev_to_global_list
 *	Before calling this function, caller must
 *	assign a unique value to edac_dev->dev_idx.
298 299 300
 *
 *	lock to be held prior to call:	device_ctls_mutex
 *
301 302 303 304
 *	Return:
 *		0 on success
 *		1 on failure.
 */
305
static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
306 307 308 309 310 311 312
{
	struct list_head *item, *insert_before;
	struct edac_device_ctl_info *rover;

	insert_before = &edac_device_list;

	/* Determine if already on the list */
313 314
	rover = find_edac_device_by_dev(edac_dev->dev);
	if (unlikely(rover != NULL))
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
		goto fail0;

	/* Insert in ascending order by 'dev_idx', so find position */
	list_for_each(item, &edac_device_list) {
		rover = list_entry(item, struct edac_device_ctl_info, link);

		if (rover->dev_idx >= edac_dev->dev_idx) {
			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
				goto fail1;

			insert_before = item;
			break;
		}
	}

	list_add_tail_rcu(&edac_dev->link, insert_before);
	return 0;

333
fail0:
334
	edac_printk(KERN_WARNING, EDAC_MC,
335 336 337
			"%s (%s) %s %s already assigned %d\n",
			rover->dev->bus_id, dev_name(rover),
			rover->mod_name, rover->ctl_name, rover->dev_idx);
338 339
	return 1;

340
fail1:
341
	edac_printk(KERN_WARNING, EDAC_MC,
342 343 344
			"bug in low-level driver: attempt to assign\n"
			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
			__func__);
345 346 347 348 349
	return 1;
}

/*
 * complete_edac_device_list_del
350 351
 *
 *	callback function when reference count is zero
352 353 354 355 356 357 358
 */
static void complete_edac_device_list_del(struct rcu_head *head)
{
	struct edac_device_ctl_info *edac_dev;

	edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
	INIT_LIST_HEAD(&edac_dev->link);
359
	complete(&edac_dev->removal_complete);
360 361 362 363
}

/*
 * del_edac_device_from_global_list
364
 *
365 366
 *	remove the RCU, setup for a callback call,
 *	then wait for the callback to occur
367
 */
368
static void del_edac_device_from_global_list(struct edac_device_ctl_info
369
						*edac_device)
370 371
{
	list_del_rcu(&edac_device->link);
372 373

	init_completion(&edac_device->removal_complete);
374
	call_rcu(&edac_device->rcu, complete_edac_device_list_del);
375
	wait_for_completion(&edac_device->removal_complete);
376 377 378 379 380 381 382 383 384 385 386
}

/**
 * edac_device_find
 *	Search for a edac_device_ctl_info structure whose index is 'idx'.
 *
 * If found, return a pointer to the structure.
 * Else return NULL.
 *
 * Caller must hold device_ctls_mutex.
 */
387
struct edac_device_ctl_info *edac_device_find(int idx)
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
{
	struct list_head *item;
	struct edac_device_ctl_info *edac_dev;

	/* Iterate over list, looking for exact match of ID */
	list_for_each(item, &edac_device_list) {
		edac_dev = list_entry(item, struct edac_device_ctl_info, link);

		if (edac_dev->dev_idx >= idx) {
			if (edac_dev->dev_idx == idx)
				return edac_dev;

			/* not on list, so terminate early */
			break;
		}
	}

	return NULL;
}
407
EXPORT_SYMBOL_GPL(edac_device_find);
408 409

/*
410
 * edac_device_workq_function
411
 *	performs the operation scheduled by a workq request
412 413 414 415 416 417 418 419
 *
 *	this workq is embedded within an edac_device_ctl_info
 *	structure, that needs to be polled for possible error events.
 *
 *	This operation is to acquire the list mutex lock
 *	(thus preventing insertation or deletion)
 *	and then call the device's poll function IFF this device is
 *	running polled and there is a poll function defined.
420
 */
421
static void edac_device_workq_function(struct work_struct *work_req)
422
{
423 424
	struct delayed_work *d_work = (struct delayed_work *)work_req;
	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
425

426
	mutex_lock(&device_ctls_mutex);
427 428 429

	/* Only poll controllers that are running polled and have a check */
	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
430 431
		(edac_dev->edac_check != NULL)) {
			edac_dev->edac_check(edac_dev);
432 433
	}

434
	mutex_unlock(&device_ctls_mutex);
435

436 437 438 439 440 441 442
	/* Reschedule the workq for the next time period to start again
	 * if the number of msec is for 1 sec, then adjust to the next
	 * whole one second to save timers fireing all over the period
	 * between integral seconds
	 */
	if (edac_dev->poll_msec == 1000)
		queue_delayed_work(edac_workqueue, &edac_dev->work,
443
				round_jiffies_relative(edac_dev->delay));
444 445 446
	else
		queue_delayed_work(edac_workqueue, &edac_dev->work,
				edac_dev->delay);
447 448 449
}

/*
450
 * edac_device_workq_setup
451 452 453
 *	initialize a workq item for this edac_device instance
 *	passing in the new delay period in msec
 */
454
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
455
				unsigned msec)
456 457 458
{
	debugf0("%s()\n", __func__);

459 460 461 462
	/* take the arg 'msec' and set it into the control structure
	 * to used in the time period calculation
	 * then calc the number of jiffies that represents
	 */
463
	edac_dev->poll_msec = msec;
464
	edac_dev->delay = msecs_to_jiffies(msec);
465

466
	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
467 468 469 470 471 472 473 474

	/* optimize here for the 1 second case, which will be normal value, to
	 * fire ON the 1 second time event. This helps reduce all sorts of
	 * timers firing on sub-second basis, while they are happy
	 * to fire together on the 1 second exactly
	 */
	if (edac_dev->poll_msec == 1000)
		queue_delayed_work(edac_workqueue, &edac_dev->work,
475
				round_jiffies_relative(edac_dev->delay));
476 477 478
	else
		queue_delayed_work(edac_workqueue, &edac_dev->work,
				edac_dev->delay);
479 480 481
}

/*
482
 * edac_device_workq_teardown
483 484
 *	stop the workq processing on this edac_dev
 */
485
void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
486 487 488 489 490 491 492 493 494 495 496 497
{
	int status;

	status = cancel_delayed_work(&edac_dev->work);
	if (status == 0) {
		/* workq instance might be running, wait for it */
		flush_workqueue(edac_workqueue);
	}
}

/*
 * edac_device_reset_delay_period
498 499 500 501
 *
 *	need to stop any outstanding workq queued up at this time
 *	because we will be resetting the sleep time.
 *	Then restart the workq on the new delay
502
 */
503
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
504
					unsigned long value)
505
{
506
	/* cancel the current workq request, without the mutex lock */
507
	edac_device_workq_teardown(edac_dev);
508

509 510 511
	/* acquire the mutex before doing the workq setup */
	mutex_lock(&device_ctls_mutex);

512
	/* restart the workq request, with new delay value */
513
	edac_device_workq_setup(edac_dev, value);
514

515
	mutex_unlock(&device_ctls_mutex);
516 517 518 519 520 521 522 523 524 525 526 527 528
}

/**
 * edac_device_add_device: Insert the 'edac_dev' structure into the
 * edac_device global list and create sysfs entries associated with
 * edac_device structure.
 * @edac_device: pointer to the edac_device structure to be added to the list
 * 'edac_device' structure.
 *
 * Return:
 *	0	Success
 *	!0	Failure
 */
529
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
530 531 532 533 534 535 536
{
	debugf0("%s()\n", __func__);

#ifdef CONFIG_EDAC_DEBUG
	if (edac_debug_level >= 3)
		edac_device_dump_device(edac_dev);
#endif
537
	mutex_lock(&device_ctls_mutex);
538 539 540 541 542 543 544 545 546 547

	if (add_edac_dev_to_global_list(edac_dev))
		goto fail0;

	/* set load time so that error rate can be tracked */
	edac_dev->start_time = jiffies;

	/* create this instance's sysfs entries */
	if (edac_device_create_sysfs(edac_dev)) {
		edac_device_printk(edac_dev, KERN_WARNING,
548
					"failed to create sysfs device\n");
549 550 551 552 553 554 555 556
		goto fail1;
	}

	/* If there IS a check routine, then we are running POLLED */
	if (edac_dev->edac_check != NULL) {
		/* This instance is NOW RUNNING */
		edac_dev->op_state = OP_RUNNING_POLL;

557 558 559 560 561
		/*
		 * enable workq processing on this instance,
		 * default = 1000 msec
		 */
		edac_device_workq_setup(edac_dev, 1000);
562 563 564 565 566 567
	} else {
		edac_dev->op_state = OP_RUNNING_INTERRUPT;
	}

	/* Report action taken */
	edac_device_printk(edac_dev, KERN_INFO,
568 569 570 571 572
				"Giving out device to module '%s' controller "
				"'%s': DEV '%s' (%s)\n",
				edac_dev->mod_name,
				edac_dev->ctl_name,
				dev_name(edac_dev),
573
				edac_op_state_to_string(edac_dev->op_state));
574

575
	mutex_unlock(&device_ctls_mutex);
576 577
	return 0;

578
fail1:
579 580 581
	/* Some error, so remove the entry from the lsit */
	del_edac_device_from_global_list(edac_dev);

582
fail0:
583
	mutex_unlock(&device_ctls_mutex);
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	return 1;
}
EXPORT_SYMBOL_GPL(edac_device_add_device);

/**
 * edac_device_del_device:
 *	Remove sysfs entries for specified edac_device structure and
 *	then remove edac_device structure from global list
 *
 * @pdev:
 *	Pointer to 'struct device' representing edac_device
 *	structure to remove.
 *
 * Return:
 *	Pointer to removed edac_device structure,
 *	OR NULL if device not found.
 */
601
struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
602 603 604
{
	struct edac_device_ctl_info *edac_dev;

605
	debugf0("%s()\n", __func__);
606

607
	mutex_lock(&device_ctls_mutex);
608

609 610 611
	/* Find the structure on the list, if not there, then leave */
	edac_dev = find_edac_device_by_dev(dev);
	if (edac_dev == NULL) {
612
		mutex_unlock(&device_ctls_mutex);
613 614 615 616 617 618 619
		return NULL;
	}

	/* mark this instance as OFFLINE */
	edac_dev->op_state = OP_OFFLINE;

	/* clear workq processing on this instance */
620
	edac_device_workq_teardown(edac_dev);
621 622 623 624

	/* deregister from global list */
	del_edac_device_from_global_list(edac_dev);

625
	mutex_unlock(&device_ctls_mutex);
626

627 628 629
	/* Tear down the sysfs entries for this instance */
	edac_device_remove_sysfs(edac_dev);

630
	edac_printk(KERN_INFO, EDAC_MC,
631 632 633
		"Removed device %d for %s %s: DEV %s\n",
		edac_dev->dev_idx,
		edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
634 635 636

	return edac_dev;
}
637
EXPORT_SYMBOL_GPL(edac_device_del_device);
638 639 640 641 642 643 644 645 646 647 648

static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
{
	return edac_dev->log_ce;
}

static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
{
	return edac_dev->log_ue;
}

649
static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
650
					*edac_dev)
651 652 653 654 655 656 657 658 659
{
	return edac_dev->panic_on_ue;
}

/*
 * edac_device_handle_ce
 *	perform a common output and handling of an 'edac_dev' CE event
 */
void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
660
			int inst_nr, int block_nr, const char *msg)
661 662 663 664 665 666
{
	struct edac_device_instance *instance;
	struct edac_device_block *block = NULL;

	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
		edac_device_printk(edac_dev, KERN_ERR,
667 668 669
				"INTERNAL ERROR: 'instance' out of range "
				"(%d >= %d)\n", inst_nr,
				edac_dev->nr_instances);
670 671 672 673 674 675 676
		return;
	}

	instance = edac_dev->instances + inst_nr;

	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
		edac_device_printk(edac_dev, KERN_ERR,
677 678 679 680
				"INTERNAL ERROR: instance %d 'block' "
				"out of range (%d >= %d)\n",
				inst_nr, block_nr,
				instance->nr_blocks);
681 682 683 684 685 686 687 688 689 690 691 692 693 694
		return;
	}

	if (instance->nr_blocks > 0) {
		block = instance->blocks + block_nr;
		block->counters.ce_count++;
	}

	/* Propogate the count up the 'totals' tree */
	instance->counters.ce_count++;
	edac_dev->counters.ce_count++;

	if (edac_device_get_log_ce(edac_dev))
		edac_device_printk(edac_dev, KERN_WARNING,
695 696 697
				"CE: %s instance: %s block: %s '%s'\n",
				edac_dev->ctl_name, instance->name,
				block ? block->name : "N/A", msg);
698 699 700 701 702 703 704 705
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce);

/*
 * edac_device_handle_ue
 *	perform a common output and handling of an 'edac_dev' UE event
 */
void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
706
			int inst_nr, int block_nr, const char *msg)
707 708 709 710 711 712
{
	struct edac_device_instance *instance;
	struct edac_device_block *block = NULL;

	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
		edac_device_printk(edac_dev, KERN_ERR,
713 714 715
				"INTERNAL ERROR: 'instance' out of range "
				"(%d >= %d)\n", inst_nr,
				edac_dev->nr_instances);
716 717 718 719 720 721 722
		return;
	}

	instance = edac_dev->instances + inst_nr;

	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
		edac_device_printk(edac_dev, KERN_ERR,
723 724 725 726
				"INTERNAL ERROR: instance %d 'block' "
				"out of range (%d >= %d)\n",
				inst_nr, block_nr,
				instance->nr_blocks);
727 728 729 730 731 732 733 734 735 736 737 738 739 740
		return;
	}

	if (instance->nr_blocks > 0) {
		block = instance->blocks + block_nr;
		block->counters.ue_count++;
	}

	/* Propogate the count up the 'totals' tree */
	instance->counters.ue_count++;
	edac_dev->counters.ue_count++;

	if (edac_device_get_log_ue(edac_dev))
		edac_device_printk(edac_dev, KERN_EMERG,
741 742 743
				"UE: %s instance: %s block: %s '%s'\n",
				edac_dev->ctl_name, instance->name,
				block ? block->name : "N/A", msg);
744 745

	if (edac_device_get_panic_on_ue(edac_dev))
746
		panic("EDAC %s: UE instance: %s block %s '%s'\n",
747 748
			edac_dev->ctl_name, instance->name,
			block ? block->name : "N/A", msg);
749
}
750
EXPORT_SYMBOL_GPL(edac_device_handle_ue);