opal.c 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * PowerNV OPAL high level interfaces
 *
 * Copyright 2011 IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12
#define pr_fmt(fmt)	"opal: " fmt
13

14
#include <linux/printk.h>
15 16
#include <linux/types.h>
#include <linux/of.h>
R
Rob Herring 已提交
17
#include <linux/of_fdt.h>
18
#include <linux/of_platform.h>
19
#include <linux/interrupt.h>
20
#include <linux/notifier.h>
21
#include <linux/slab.h>
22
#include <linux/sched.h>
23
#include <linux/kobject.h>
24
#include <linux/delay.h>
25
#include <linux/memblock.h>
26 27
#include <linux/kthread.h>
#include <linux/freezer.h>
28 29

#include <asm/machdep.h>
30 31
#include <asm/opal.h>
#include <asm/firmware.h>
32
#include <asm/mce.h>
33 34 35

#include "powernv.h"

36 37 38
/* /sys/firmware/opal */
struct kobject *opal_kobj;

39 40 41
struct opal {
	u64 base;
	u64 entry;
42
	u64 size;
43 44
} opal;

45 46 47 48 49 50 51 52 53
struct mcheck_recoverable_range {
	u64 start_addr;
	u64 end_addr;
	u64 recover_addr;
};

static struct mcheck_recoverable_range *mc_recoverable_range;
static int mc_recoverable_range_len;

54
struct device_node *opal_node;
55
static DEFINE_SPINLOCK(opal_write_lock);
56 57
static unsigned int *opal_irqs;
static unsigned int opal_irq_count;
58
static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
59
static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
60 61 62
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
63
static uint32_t opal_heartbeat;
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static void opal_reinit_cores(void)
{
	/* Do the actual re-init, This will clobber all FPRs, VRs, etc...
	 *
	 * It will preserve non volatile GPRs and HSPRG0/1. It will
	 * also restore HIDs and other SPRs to their original value
	 * but it might clobber a bunch.
	 */
#ifdef __BIG_ENDIAN__
	opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
#else
	opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
#endif
}

80 81 82
int __init early_init_dt_scan_opal(unsigned long node,
				   const char *uname, int depth, void *data)
{
83
	const void *basep, *entryp, *sizep;
84
	int basesz, entrysz, runtimesz;
85 86 87 88 89 90

	if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
		return 0;

	basep  = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
	entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
91
	sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
92

93
	if (!basep || !entryp || !sizep)
94 95 96 97
		return 1;

	opal.base = of_read_number(basep, basesz/4);
	opal.entry = of_read_number(entryp, entrysz/4);
98
	opal.size = of_read_number(sizep, runtimesz/4);
99

100
	pr_debug("OPAL Base  = 0x%llx (basep=%p basesz=%d)\n",
101
		 opal.base, basep, basesz);
102
	pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
103
		 opal.entry, entryp, entrysz);
104
	pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
105
		 opal.size, sizep, runtimesz);
106 107

	powerpc_firmware_features |= FW_FEATURE_OPAL;
108 109 110
	if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
		powerpc_firmware_features |= FW_FEATURE_OPALv2;
		powerpc_firmware_features |= FW_FEATURE_OPALv3;
111
		pr_info("OPAL V3 detected !\n");
112
	} else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
113
		powerpc_firmware_features |= FW_FEATURE_OPALv2;
114
		pr_info("OPAL V2 detected !\n");
115
	} else {
116
		pr_info("OPAL V1 detected !\n");
117 118
	}

119 120 121 122 123 124 125
	/* Reinit all cores with the right endian */
	opal_reinit_cores();

	/* Restore some bits */
	if (cur_cpu_spec->cpu_restore)
		cur_cpu_spec->cpu_restore();

126 127 128
	return 1;
}

129 130 131
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
				   const char *uname, int depth, void *data)
{
132
	int i, psize, size;
133 134 135 136 137
	const __be32 *prop;

	if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
		return 0;

138
	prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
139 140 141 142 143 144

	if (!prop)
		return 1;

	pr_debug("Found machine check recoverable ranges.\n");

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	/*
	 * Calculate number of available entries.
	 *
	 * Each recoverable address range entry is (start address, len,
	 * recovery address), 2 cells each for start and recovery address,
	 * 1 cell for len, totalling 5 cells per entry.
	 */
	mc_recoverable_range_len = psize / (sizeof(*prop) * 5);

	/* Sanity check */
	if (!mc_recoverable_range_len)
		return 1;

	/* Size required to hold all the entries. */
	size = mc_recoverable_range_len *
			sizeof(struct mcheck_recoverable_range);

162 163 164 165 166 167 168 169 170
	/*
	 * Allocate a buffer to hold the MC recoverable ranges. We would be
	 * accessing them in real mode, hence it needs to be within
	 * RMO region.
	 */
	mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
							ppc64_rma_size));
	memset(mc_recoverable_range, 0, size);

171
	for (i = 0; i < mc_recoverable_range_len; i++) {
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
		mc_recoverable_range[i].start_addr =
					of_read_number(prop + (i * 5) + 0, 2);
		mc_recoverable_range[i].end_addr =
					mc_recoverable_range[i].start_addr +
					of_read_number(prop + (i * 5) + 2, 1);
		mc_recoverable_range[i].recover_addr =
					of_read_number(prop + (i * 5) + 3, 2);

		pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
				mc_recoverable_range[i].start_addr,
				mc_recoverable_range[i].end_addr,
				mc_recoverable_range[i].recover_addr);
	}
	return 1;
}

188 189
static int __init opal_register_exception_handlers(void)
{
190
#ifdef __BIG_ENDIAN__
191 192 193 194 195
	u64 glue;

	if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
		return -ENODEV;

196 197
	/* Hookup some exception handlers except machine check. We use the
	 * fwnmi area at 0x7000 to provide the glue space to OPAL
198 199
	 */
	glue = 0x7000;
200 201 202 203 204 205 206 207 208 209 210 211 212 213

	/*
	 * Check if we are running on newer firmware that exports
	 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
	 * the HMI interrupt and we catch it directly in Linux.
	 *
	 * For older firmware (i.e currently released POWER8 System Firmware
	 * as of today <= SV810_087), we fallback to old behavior and let OPAL
	 * patch the HMI vector and handle it inside OPAL firmware.
	 *
	 * For newer firmware (in development/yet to be released) we will
	 * start catching/handling HMI directly in Linux.
	 */
	if (!opal_check_token(OPAL_HANDLE_HMI)) {
214
		pr_info("Old firmware detected, OPAL handles HMIs.\n");
215 216 217 218 219 220
		opal_register_exception_handler(
				OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
				0, glue);
		glue += 128;
	}

221
	opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
222
#endif
223

224
	return 0;
225
}
226
machine_early_initcall(powernv, opal_register_exception_handlers);
227

228 229 230 231 232 233 234 235 236 237 238
int opal_notifier_register(struct notifier_block *nb)
{
	if (!nb) {
		pr_warning("%s: Invalid argument (%p)\n",
			   __func__, nb);
		return -EINVAL;
	}

	atomic_notifier_chain_register(&opal_notifier_head, nb);
	return 0;
}
239 240 241 242 243 244 245 246 247 248 249 250 251 252
EXPORT_SYMBOL_GPL(opal_notifier_register);

int opal_notifier_unregister(struct notifier_block *nb)
{
	if (!nb) {
		pr_warning("%s: Invalid argument (%p)\n",
			   __func__, nb);
		return -EINVAL;
	}

	atomic_notifier_chain_unregister(&opal_notifier_head, nb);
	return 0;
}
EXPORT_SYMBOL_GPL(opal_notifier_unregister);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

static void opal_do_notifier(uint64_t events)
{
	unsigned long flags;
	uint64_t changed_mask;

	if (atomic_read(&opal_notifier_hold))
		return;

	spin_lock_irqsave(&opal_notifier_lock, flags);
	changed_mask = last_notified_mask ^ events;
	last_notified_mask = events;
	spin_unlock_irqrestore(&opal_notifier_lock, flags);

	/*
	 * We feed with the event bits and changed bits for
	 * enough information to the callback.
	 */
	atomic_notifier_call_chain(&opal_notifier_head,
				   events, (void *)changed_mask);
}

void opal_notifier_update_evt(uint64_t evt_mask,
			      uint64_t evt_val)
{
	unsigned long flags;

	spin_lock_irqsave(&opal_notifier_lock, flags);
	last_notified_mask &= ~evt_mask;
	last_notified_mask |= evt_val;
	spin_unlock_irqrestore(&opal_notifier_lock, flags);
}

void opal_notifier_enable(void)
{
	int64_t rc;
289
	__be64 evt = 0;
290 291 292 293 294 295

	atomic_set(&opal_notifier_hold, 0);

	/* Process pending events */
	rc = opal_poll_events(&evt);
	if (rc == OPAL_SUCCESS && evt)
296
		opal_do_notifier(be64_to_cpu(evt));
297 298 299 300 301 302 303
}

void opal_notifier_disable(void)
{
	atomic_set(&opal_notifier_hold, 1);
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/*
 * Opal message notifier based on message type. Allow subscribers to get
 * notified for specific messgae type.
 */
int opal_message_notifier_register(enum OpalMessageType msg_type,
					struct notifier_block *nb)
{
	if (!nb) {
		pr_warning("%s: Invalid argument (%p)\n",
			   __func__, nb);
		return -EINVAL;
	}
	if (msg_type > OPAL_MSG_TYPE_MAX) {
		pr_warning("%s: Invalid message type argument (%d)\n",
			   __func__, msg_type);
		return -EINVAL;
	}
	return atomic_notifier_chain_register(
				&opal_msg_notifier_head[msg_type], nb);
}

static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
	/* notify subscribers */
	atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
					msg_type, msg);
}

static void opal_handle_message(void)
{
	s64 ret;
	/*
	 * TODO: pre-allocate a message buffer depending on opal-msg-size
	 * value in /proc/device-tree.
	 */
	static struct opal_msg msg;
340
	u32 type;
341 342 343 344 345 346 347 348

	ret = opal_get_msg(__pa(&msg), sizeof(msg));
	/* No opal message pending. */
	if (ret == OPAL_RESOURCE)
		return;

	/* check for errors. */
	if (ret) {
M
Masanari Iida 已提交
349
		pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
350 351 352 353
				__func__, ret);
		return;
	}

354 355
	type = be32_to_cpu(msg.msg_type);

356
	/* Sanity check */
357 358
	if (type > OPAL_MSG_TYPE_MAX) {
		pr_warning("%s: Unknown message type: %u\n", __func__, type);
359 360
		return;
	}
361
	opal_message_do_notify(type, (void *)&msg);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
}

static int opal_message_notify(struct notifier_block *nb,
			  unsigned long events, void *change)
{
	if (events & OPAL_EVENT_MSG_PENDING)
		opal_handle_message();
	return 0;
}

static struct notifier_block opal_message_nb = {
	.notifier_call	= opal_message_notify,
	.next		= NULL,
	.priority	= 0,
};

static int __init opal_message_init(void)
{
	int ret, i;

	for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
		ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);

	ret = opal_notifier_register(&opal_message_nb);
	if (ret) {
		pr_err("%s: Can't register OPAL event notifier (%d)\n",
		       __func__, ret);
		return ret;
	}
	return 0;
}
393
machine_early_initcall(powernv, opal_message_init);
394

395 396
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
397 398
	s64 rc;
	__be64 evt, len;
399 400

	if (!opal.entry)
401
		return -ENODEV;
402
	opal_poll_events(&evt);
403
	if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
404
		return 0;
405
	len = cpu_to_be64(count);
406
	rc = opal_console_read(vtermno, &len, buf);
407
	if (rc == OPAL_SUCCESS)
408
		return be64_to_cpu(len);
409 410 411 412 413 414
	return 0;
}

int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
{
	int written = 0;
415
	__be64 olen;
416
	s64 len, rc;
417
	unsigned long flags;
418
	__be64 evt;
419 420

	if (!opal.entry)
421
		return -ENODEV;
422 423 424

	/* We want put_chars to be atomic to avoid mangling of hvsi
	 * packets. To do that, we first test for room and return
425 426 427 428 429
	 * -EAGAIN if there isn't enough.
	 *
	 * Unfortunately, opal_console_write_buffer_space() doesn't
	 * appear to work on opal v1, so we just assume there is
	 * enough room and be done with it
430 431
	 */
	spin_lock_irqsave(&opal_write_lock, flags);
432
	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
433 434
		rc = opal_console_write_buffer_space(vtermno, &olen);
		len = be64_to_cpu(olen);
435 436 437 438 439
		if (rc || len < total_len) {
			spin_unlock_irqrestore(&opal_write_lock, flags);
			/* Closed -> drop characters */
			if (rc)
				return total_len;
440
			opal_poll_events(NULL);
441 442
			return -EAGAIN;
		}
443 444 445 446 447
	}

	/* We still try to handle partial completions, though they
	 * should no longer happen.
	 */
448
	rc = OPAL_BUSY;
449 450
	while(total_len > 0 && (rc == OPAL_BUSY ||
				rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
451 452 453
		olen = cpu_to_be64(total_len);
		rc = opal_console_write(vtermno, &olen, data);
		len = be64_to_cpu(olen);
454 455 456 457 458 459 460

		/* Closed or other error drop */
		if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
		    rc != OPAL_BUSY_EVENT) {
			written = total_len;
			break;
		}
461 462 463 464 465 466 467 468 469 470 471 472
		if (rc == OPAL_SUCCESS) {
			total_len -= len;
			data += len;
			written += len;
		}
		/* This is a bit nasty but we need that for the console to
		 * flush when there aren't any interrupts. We will clean
		 * things a bit later to limit that to synchronous path
		 * such as the kernel console and xmon/udbg
		 */
		do
			opal_poll_events(&evt);
473 474
		while(rc == OPAL_SUCCESS &&
			(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
475 476 477 478 479
	}
	spin_unlock_irqrestore(&opal_write_lock, flags);
	return written;
}

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
static int opal_recover_mce(struct pt_regs *regs,
					struct machine_check_event *evt)
{
	int recovered = 0;
	uint64_t ea = get_mce_fault_addr(evt);

	if (!(regs->msr & MSR_RI)) {
		/* If MSR_RI isn't set, we cannot recover */
		recovered = 0;
	} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
		/* Platform corrected itself */
		recovered = 1;
	} else if (ea && !is_kernel_addr(ea)) {
		/*
		 * Faulting address is not in kernel text. We should be fine.
		 * We need to find which process uses this address.
		 * For now, kill the task if we have received exception when
		 * in userspace.
		 *
		 * TODO: Queue up this address for hwpoisioning later.
		 */
		if (user_mode(regs) && !is_global_init(current)) {
			_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
			recovered = 1;
		} else
			recovered = 0;
	} else if (user_mode(regs) && !is_global_init(current) &&
		evt->severity == MCE_SEV_ERROR_SYNC) {
		/*
		 * If we have received a synchronous error when in userspace
		 * kill the task.
		 */
		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
		recovered = 1;
	}
	return recovered;
}

518 519
int opal_machine_check(struct pt_regs *regs)
{
520
	struct machine_check_event evt;
521

522 523
	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
		return 0;
524 525

	/* Print things out */
526
	if (evt.version != MCE_V1) {
527 528 529 530
		pr_err("Machine Check Exception, Unknown event version %d !\n",
		       evt.version);
		return 0;
	}
531
	machine_check_print_event_info(&evt);
532

533 534 535
	if (opal_recover_mce(regs, &evt))
		return 1;
	return 0;
536 537
}

538 539 540
/* Early hmi handler called in real mode. */
int opal_hmi_exception_early(struct pt_regs *regs)
{
541 542 543 544 545 546 547 548 549 550 551 552
	s64 rc;

	/*
	 * call opal hmi handler. Pass paca address as token.
	 * The return value OPAL_SUCCESS is an indication that there is
	 * an HMI event generated waiting to pull by Linux.
	 */
	rc = opal_handle_hmi();
	if (rc == OPAL_SUCCESS) {
		local_paca->hmi_event_available = 1;
		return 1;
	}
553 554 555 556 557 558
	return 0;
}

/* HMI exception handler called in virtual mode during check_irq_replay. */
int opal_handle_hmi_exception(struct pt_regs *regs)
{
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	s64 rc;
	__be64 evt = 0;

	/*
	 * Check if HMI event is available.
	 * if Yes, then call opal_poll_events to pull opal messages and
	 * process them.
	 */
	if (!local_paca->hmi_event_available)
		return 0;

	local_paca->hmi_event_available = 0;
	rc = opal_poll_events(&evt);
	if (rc == OPAL_SUCCESS && evt)
		opal_do_notifier(be64_to_cpu(evt));

	return 1;
576 577
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
static uint64_t find_recovery_address(uint64_t nip)
{
	int i;

	for (i = 0; i < mc_recoverable_range_len; i++)
		if ((nip >= mc_recoverable_range[i].start_addr) &&
		    (nip < mc_recoverable_range[i].end_addr))
		    return mc_recoverable_range[i].recover_addr;
	return 0;
}

bool opal_mce_check_early_recovery(struct pt_regs *regs)
{
	uint64_t recover_addr = 0;

	if (!opal.base || !opal.size)
		goto out;

	if ((regs->nip >= opal.base) &&
			(regs->nip <= (opal.base + opal.size)))
		recover_addr = find_recovery_address(regs->nip);

	/*
	 * Setup regs->nip to rfi into fixup address.
	 */
	if (recover_addr)
		regs->nip = recover_addr;

out:
	return !!recover_addr;
}

610 611
static irqreturn_t opal_interrupt(int irq, void *data)
{
612
	__be64 events;
613 614 615

	opal_handle_interrupt(virq_to_hw(irq), &events);

616
	opal_do_notifier(be64_to_cpu(events));
617 618 619 620

	return IRQ_HANDLED;
}

621 622 623 624 625 626 627 628 629 630 631
static int opal_sysfs_init(void)
{
	opal_kobj = kobject_create_and_add("opal", firmware_kobj);
	if (!opal_kobj) {
		pr_warn("kobject_create_and_add opal failed\n");
		return -ENOMEM;
	}

	return 0;
}

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
			       struct bin_attribute *bin_attr,
			       char *buf, loff_t off, size_t count)
{
	return memory_read_from_buffer(buf, count, &off, bin_attr->private,
				       bin_attr->size);
}

static BIN_ATTR_RO(symbol_map, 0);

static void opal_export_symmap(void)
{
	const __be64 *syms;
	unsigned int size;
	struct device_node *fw;
	int rc;

	fw = of_find_node_by_path("/ibm,opal/firmware");
	if (!fw)
		return;
	syms = of_get_property(fw, "symbol-map", &size);
	if (!syms || size != 2 * sizeof(__be64))
		return;

	/* Setup attributes */
	bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
	bin_attr_symbol_map.size = be64_to_cpu(syms[1]);

	rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
	if (rc)
		pr_warn("Error %d creating OPAL symbols file\n", rc);
}

665 666 667 668 669 670 671 672
static void __init opal_dump_region_init(void)
{
	void *addr;
	uint64_t size;
	int rc;

	/* Register kernel log buffer */
	addr = log_buf_addr_get();
673 674 675
	if (addr == NULL)
		return;

676
	size = log_buf_len_get();
677 678 679
	if (size == 0)
		return;

680 681 682 683 684 685 686 687 688
	rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
				       __pa(addr), size);
	/* Don't warn if this is just an older OPAL that doesn't
	 * know about that call
	 */
	if (rc && rc != OPAL_UNSUPPORTED)
		pr_warn("DUMP: Failed to register kernel log buffer. "
			"rc = %d\n", rc);
}
689 690 691 692 693 694 695 696 697 698

static void opal_ipmi_init(struct device_node *opal_node)
{
	struct device_node *np;

	for_each_child_of_node(opal_node, np)
		if (of_device_is_compatible(np, "ibm,opal-ipmi"))
			of_platform_device_create(np, NULL, NULL);
}

699 700 701 702 703 704 705 706
static void opal_i2c_create_devs(void)
{
	struct device_node *np;

	for_each_compatible_node(np, NULL, "ibm,opal-i2c")
		of_platform_device_create(np, NULL, NULL);
}

707 708 709 710 711 712 713
static void __init opal_irq_init(struct device_node *dn)
{
	const __be32 *irqs;
	int i, irqlen;

	/* Get interrupt property */
	irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
714 715 716 717
	opal_irq_count = irqs ? (irqlen / 4) : 0;
	pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
	if (!opal_irq_count)
		return;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746

	/* Install interrupt handlers */
	opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
	for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
		unsigned int irq, virq;
		int rc;

		/* Get hardware and virtual IRQ */
		irq = be32_to_cpup(irqs);
		virq = irq_create_mapping(NULL, irq);
		if (virq == NO_IRQ) {
			pr_warn("Failed to map irq 0x%x\n", irq);
			continue;
		}

		/* Install interrupt handler */
		rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
		if (rc) {
			irq_dispose_mapping(virq);
			pr_warn("Error %d requesting irq %d (0x%x)\n",
				 rc, virq, irq);
			continue;
		}

		/* Cache IRQ */
		opal_irqs[i] = virq;
	}
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
static int kopald(void *unused)
{
	set_freezable();
	do {
		try_to_freeze();
		opal_poll_events(NULL);
		msleep_interruptible(opal_heartbeat);
	} while (!kthread_should_stop());

	return 0;
}

static void opal_init_heartbeat(void)
{
	/* Old firwmware, we assume the HVC heartbeat is sufficient */
	if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
				 &opal_heartbeat) != 0)
		opal_heartbeat = 0;

	if (opal_heartbeat)
		kthread_run(kopald, NULL, "kopald");
}

770 771 772
static int __init opal_init(void)
{
	struct device_node *np, *consoles;
773
	int rc;
774 775 776

	opal_node = of_find_node_by_path("/ibm,opal");
	if (!opal_node) {
777
		pr_warn("Device node not found\n");
778 779
		return -ENODEV;
	}
780 781

	/* Register OPAL consoles if any ports */
782 783 784 785
	if (firmware_has_feature(FW_FEATURE_OPALv2))
		consoles = of_find_node_by_path("/ibm,opal/consoles");
	else
		consoles = of_node_get(opal_node);
786 787 788 789 790 791 792
	if (consoles) {
		for_each_child_of_node(consoles, np) {
			if (strcmp(np->name, "serial"))
				continue;
			of_platform_device_create(np, NULL, NULL);
		}
		of_node_put(consoles);
793
	}
794

795 796 797
	/* Create i2c platform devices */
	opal_i2c_create_devs();

798 799 800
	/* Setup a heatbeat thread if requested by OPAL */
	opal_init_heartbeat();

801
	/* Find all OPAL interrupts and request them */
802
	opal_irq_init(opal_node);
803 804 805

	/* Create "opal" kobject under /sys/firmware */
	rc = opal_sysfs_init();
806
	if (rc == 0) {
807 808
		/* Export symbol map to userspace */
		opal_export_symmap();
809 810
		/* Setup dump region interface */
		opal_dump_region_init();
811 812
		/* Setup error log interface */
		rc = opal_elog_init();
813 814
		/* Setup code update interface */
		opal_flash_init();
815 816
		/* Setup platform dump extract interface */
		opal_platform_dump_init();
817 818
		/* Setup system parameters interface */
		opal_sys_param_init();
819 820
		/* Setup message log interface. */
		opal_msglog_init();
821
	}
822

823
	/* Initialize OPAL IPMI backend */
824 825
	opal_ipmi_init(opal_node);

826 827
	return 0;
}
828
machine_subsys_initcall(powernv, opal_init);
829 830 831 832

void opal_shutdown(void)
{
	unsigned int i;
833
	long rc = OPAL_BUSY;
834

835
	/* First free interrupts, which will also mask them */
836 837
	for (i = 0; i < opal_irq_count; i++) {
		if (opal_irqs[i])
838
			free_irq(opal_irqs[i], NULL);
839 840
		opal_irqs[i] = 0;
	}
841 842 843 844 845 846 847 848 849 850 851 852 853

	/*
	 * Then sync with OPAL which ensure anything that can
	 * potentially write to our memory has completed such
	 * as an ongoing dump retrieval
	 */
	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
		rc = opal_sync_host_reboot();
		if (rc == OPAL_BUSY)
			opal_poll_events(NULL);
		else
			mdelay(10);
	}
854 855 856

	/* Unregister memory dump region */
	opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
857
}
858 859 860

/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
861 862
EXPORT_SYMBOL_GPL(opal_ipmi_send);
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
					     unsigned long vmalloc_size)
{
	struct opal_sg_list *sg, *first = NULL;
	unsigned long i = 0;

	sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!sg)
		goto nomem;

	first = sg;

	while (vmalloc_size > 0) {
		uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
		uint64_t length = min(vmalloc_size, PAGE_SIZE);

		sg->entry[i].data = cpu_to_be64(data);
		sg->entry[i].length = cpu_to_be64(length);
		i++;

		if (i >= SG_ENTRIES_PER_NODE) {
			struct opal_sg_list *next;

			next = kzalloc(PAGE_SIZE, GFP_KERNEL);
			if (!next)
				goto nomem;

			sg->length = cpu_to_be64(
					i * sizeof(struct opal_sg_entry) + 16);
			i = 0;
			sg->next = cpu_to_be64(__pa(next));
			sg = next;
		}

		vmalloc_addr += length;
		vmalloc_size -= length;
	}

	sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);

	return first;

nomem:
	pr_err("%s : Failed to allocate memory\n", __func__);
	opal_free_sg_list(first);
	return NULL;
}

void opal_free_sg_list(struct opal_sg_list *sg)
{
	while (sg) {
		uint64_t next = be64_to_cpu(sg->next);

		kfree(sg);

		if (next)
			sg = __va(next);
		else
			sg = NULL;
	}
}
926 927 928 929 930 931

EXPORT_SYMBOL_GPL(opal_poll_events);
EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
EXPORT_SYMBOL_GPL(opal_tpo_read);
EXPORT_SYMBOL_GPL(opal_tpo_write);
932
EXPORT_SYMBOL_GPL(opal_i2c_request);