setup.c 25.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  64-bit pSeries and RS/6000 setup code.
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 *  Copyright (C) 1995  Linus Torvalds
 *  Adapted from 'alpha' version by Gary Thomas
 *  Modified by Cort Dougan (cort@cs.nmt.edu)
 *  Modified by PPC64 Team, IBM Corp
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

/*
 * bootup setup stuff..
 */

19
#include <linux/cpu.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/pci.h>
35
#include <linux/utsname.h>
L
Linus Torvalds 已提交
36
#include <linux/adb.h>
37
#include <linux/export.h>
L
Linus Torvalds 已提交
38 39 40 41
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
42
#include <linux/of.h>
43
#include <linux/of_pci.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57

#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/nvram.h>
58
#include <asm/pmc.h>
59
#include <asm/xics.h>
60
#include <asm/xive.h>
61
#include <asm/ppc-pci.h>
62 63
#include <asm/i8259.h>
#include <asm/udbg.h>
P
Paul Mackerras 已提交
64
#include <asm/smp.h>
65
#include <asm/firmware.h>
66
#include <asm/eeh.h>
67
#include <asm/reg.h>
68
#include <asm/plpar_wrappers.h>
69
#include <asm/kexec.h>
70
#include <asm/isa-bridge.h>
71
#include <asm/security_features.h>
L
Linus Torvalds 已提交
72

73
#include "pseries.h"
74

75 76
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
77
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
A
Andrew Morton 已提交
78
EXPORT_SYMBOL(CMO_PageSize);
L
Linus Torvalds 已提交
79 80 81

int fwnmi_active;  /* TRUE if an FWNMI handler is present */

82
static void pSeries_show_cpuinfo(struct seq_file *m)
L
Linus Torvalds 已提交
83 84 85 86 87 88
{
	struct device_node *root;
	const char *model = "";

	root = of_find_node_by_path("/");
	if (root)
89
		model = of_get_property(root, "model", NULL);
L
Linus Torvalds 已提交
90 91
	seq_printf(m, "machine\t\t: CHRP %s\n", model);
	of_node_put(root);
92 93 94 95
	if (radix_enabled())
		seq_printf(m, "MMU\t\t: Radix\n");
	else
		seq_printf(m, "MMU\t\t: Hash\n");
L
Linus Torvalds 已提交
96 97 98 99 100 101 102
}

/* Initialize firmware assisted non-maskable interrupts if
 * the firmware supports this feature.
 */
static void __init fwnmi_init(void)
{
103 104
	unsigned long system_reset_addr, machine_check_addr;

L
Linus Torvalds 已提交
105 106 107
	int ibm_nmi_register = rtas_token("ibm,nmi-register");
	if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
		return;
108 109 110 111 112 113 114 115

	/* If the kernel's not linked at zero we point the firmware at low
	 * addresses anyway, and use a trampoline to get to the real code. */
	system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
	machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;

	if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
				machine_check_addr))
L
Linus Torvalds 已提交
116 117 118
		fwnmi_active = 1;
}

119
static void pseries_8259_cascade(struct irq_desc *desc)
120
{
121
	struct irq_chip *chip = irq_desc_get_chip(desc);
O
Olaf Hering 已提交
122
	unsigned int cascade_irq = i8259_irq();
123

124
	if (cascade_irq)
125
		generic_handle_irq(cascade_irq);
126 127

	chip->irq_eoi(&desc->irq_data);
128 129
}

130
static void __init pseries_setup_i8259_cascade(void)
131 132
{
	struct device_node *np, *old, *found = NULL;
133
	unsigned int cascade;
134 135
	const u32 *addrp;
	unsigned long intack = 0;
136
	int naddr;
137

138
	for_each_node_by_type(np, "interrupt-controller") {
139 140 141 142
		if (of_device_is_compatible(np, "chrp,iic")) {
			found = np;
			break;
		}
143 144
	}

145
	if (found == NULL) {
146
		printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
147 148
		return;
	}
149

150
	cascade = irq_of_parse_and_map(found, 0);
151
	if (!cascade) {
152
		printk(KERN_ERR "pic: failed to map cascade interrupt");
153 154
		return;
	}
155
	pr_debug("pic: cascade mapped to irq %d\n", cascade);
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

	for (old = of_node_get(found); old != NULL ; old = np) {
		np = of_get_parent(old);
		of_node_put(old);
		if (np == NULL)
			break;
		if (strcmp(np->name, "pci") != 0)
			continue;
		addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
		if (addrp == NULL)
			continue;
		naddr = of_n_addr_cells(np);
		intack = addrp[naddr-1];
		if (naddr > 1)
			intack |= ((unsigned long)addrp[naddr-2]) << 32;
	}
	if (intack)
173
		printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
174 175
	i8259_init(found, intack);
	of_node_put(found);
176
	irq_set_chained_handler(cascade, pseries_8259_cascade);
177 178
}

179
static void __init pseries_init_irq(void)
180
{
181 182 183 184 185
	/* Try using a XIVE if available, otherwise use a XICS */
	if (!xive_spapr_init()) {
		xics_init();
		pseries_setup_i8259_cascade();
	}
186 187
}

188 189 190 191 192 193 194 195 196
static void pseries_lpar_enable_pmcs(void)
{
	unsigned long set, reset;

	set = 1UL << 63;
	reset = 0;
	plpar_hcall_norets(H_PERFMON, set, reset);
}

197
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
198
{
199
	struct of_reconfig_data *rd = data;
200 201
	struct device_node *parent, *np = rd->dn;
	struct pci_dn *pdn;
202 203 204
	int err = NOTIFY_OK;

	switch (action) {
205
	case OF_RECONFIG_ATTACH_NODE:
206 207
		parent = of_get_parent(np);
		pdn = parent ? PCI_DN(parent) : NULL;
G
Gavin Shan 已提交
208
		if (pdn)
209
			pci_add_device_node_info(pdn->phb, np);
210 211

		of_node_put(parent);
212
		break;
213
	case OF_RECONFIG_DETACH_NODE:
214 215 216
		pdn = PCI_DN(np);
		if (pdn)
			list_del(&pdn->list);
217
		break;
218 219 220 221 222 223 224 225 226 227 228
	default:
		err = NOTIFY_DONE;
		break;
	}
	return err;
}

static struct notifier_block pci_dn_reconfig_nb = {
	.notifier_call = pci_dn_reconfig_notifier,
};

229 230
struct kmem_cache *dtl_cache;

231
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
232 233 234 235 236 237 238 239 240 241 242 243 244 245
/*
 * Allocate space for the dispatch trace log for all possible cpus
 * and register the buffers with the hypervisor.  This is used for
 * computing time stolen by the hypervisor.
 */
static int alloc_dispatch_logs(void)
{
	int cpu, ret;
	struct paca_struct *pp;
	struct dtl_entry *dtl;

	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
		return 0;

246
	if (!dtl_cache)
247 248
		return 0;

249 250
	for_each_possible_cpu(cpu) {
		pp = &paca[cpu];
251
		dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
		if (!dtl) {
			pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
				cpu);
			pr_warn("Stolen time statistics will be unreliable\n");
			break;
		}

		pp->dtl_ridx = 0;
		pp->dispatch_log = dtl;
		pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
		pp->dtl_curr = dtl;
	}

	/* Register the DTL for the current (boot) cpu */
	dtl = get_paca()->dispatch_log;
	get_paca()->dtl_ridx = 0;
	get_paca()->dtl_curr = dtl;
	get_paca()->lppaca_ptr->dtl_idx = 0;

	/* hypervisor reads buffer length from this field */
272
	dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
273 274
	ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
	if (ret)
275 276 277
		pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
		       "with %d\n", smp_processor_id(),
		       hard_smp_processor_id(), ret);
278 279 280 281
	get_paca()->lppaca_ptr->dtl_enable_mask = 2;

	return 0;
}
282
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
283 284 285 286
static inline int alloc_dispatch_logs(void)
{
	return 0;
}
287
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
288

289 290 291 292 293 294 295 296 297 298 299 300
static int alloc_dispatch_log_kmem_cache(void)
{
	dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
						DISPATCH_LOG_BYTES, 0, NULL);
	if (!dtl_cache) {
		pr_warn("Failed to create dispatch trace log buffer cache\n");
		pr_warn("Stolen time statistics will be unreliable\n");
		return 0;
	}

	return alloc_dispatch_logs();
}
301
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
302

303
static void pseries_lpar_idle(void)
304
{
305 306
	/*
	 * Default handler to go into low thread priority and possibly
M
Michael Ellerman 已提交
307
	 * low power mode by ceding processor to hypervisor
308
	 */
309 310 311 312 313 314 315 316 317 318 319 320 321 322

	/* Indicate to hypervisor that we are idle. */
	get_lppaca()->idle = 1;

	/*
	 * Yield the processor to the hypervisor.  We return if
	 * an external interrupt occurs (which are driven prior
	 * to returning here) or if a prod occurs from another
	 * processor. When returning here, external interrupts
	 * are enabled.
	 */
	cede_processor();

	get_lppaca()->idle = 0;
323 324
}

325 326 327 328 329 330 331
/*
 * Enable relocation on during exceptions. This has partition wide scope and
 * may take a while to complete, if it takes longer than one second we will
 * just give up rather than wasting any more time on this - if that turns out
 * to ever be a problem in practice we can move this into a kernel thread to
 * finish off the process later in boot.
 */
332
void pseries_enable_reloc_on_exc(void)
333 334 335 336 337 338
{
	long rc;
	unsigned int delay, total_delay = 0;

	while (1) {
		rc = enable_reloc_on_exceptions();
339 340 341 342 343 344 345 346 347 348
		if (!H_IS_LONG_BUSY(rc)) {
			if (rc == H_P2) {
				pr_info("Relocation on exceptions not"
					" supported\n");
			} else if (rc != H_SUCCESS) {
				pr_warn("Unable to enable relocation"
					" on exceptions: %ld\n", rc);
			}
			break;
		}
349 350 351 352 353 354 355

		delay = get_longbusy_msecs(rc);
		total_delay += delay;
		if (total_delay > 1000) {
			pr_warn("Warning: Giving up waiting to enable "
				"relocation on exceptions (%u msec)!\n",
				total_delay);
356
			return;
357 358 359 360 361
		}

		mdelay(delay);
	}
}
362
EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
363

364
void pseries_disable_reloc_on_exc(void)
365 366 367 368 369 370
{
	long rc;

	while (1) {
		rc = disable_reloc_on_exceptions();
		if (!H_IS_LONG_BUSY(rc))
371
			break;
372 373
		mdelay(get_longbusy_msecs(rc));
	}
374
	if (rc != H_SUCCESS)
375 376
		pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
			rc);
377
}
378
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
379

380
#ifdef CONFIG_KEXEC_CORE
381 382
static void pSeries_machine_kexec(struct kimage *image)
{
383 384
	if (firmware_has_feature(FW_FEATURE_SET_MODE))
		pseries_disable_reloc_on_exc();
385 386 387 388 389

	default_machine_kexec(image);
}
#endif

390
#ifdef __LITTLE_ENDIAN__
391
void pseries_big_endian_exceptions(void)
392 393 394 395 396 397
{
	long rc;

	while (1) {
		rc = enable_big_endian_exceptions();
		if (!H_IS_LONG_BUSY(rc))
398
			break;
399 400
		mdelay(get_longbusy_msecs(rc));
	}
401 402 403 404 405 406 407 408 409 410 411 412 413 414

	/*
	 * At this point it is unlikely panic() will get anything
	 * out to the user, since this is called very late in kexec
	 * but at least this will stop us from continuing on further
	 * and creating an even more difficult to debug situation.
	 *
	 * There is a known problem when kdump'ing, if cpus are offline
	 * the above call will fail. Rather than panicking again, keep
	 * going and hope the kdump kernel is also little endian, which
	 * it usually is.
	 */
	if (rc && !kdump_in_progress())
		panic("Could not enable big endian exceptions");
415 416
}

417
void pseries_little_endian_exceptions(void)
418 419 420 421 422 423
{
	long rc;

	while (1) {
		rc = enable_little_endian_exceptions();
		if (!H_IS_LONG_BUSY(rc))
424
			break;
425 426
		mdelay(get_longbusy_msecs(rc));
	}
427 428 429 430
	if (rc) {
		ppc_md.progress("H_SET_MODE LE exception fail", 0);
		panic("Could not enable little endian exceptions");
	}
431 432 433
}
#endif

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
static void __init find_and_init_phbs(void)
{
	struct device_node *node;
	struct pci_controller *phb;
	struct device_node *root = of_find_node_by_path("/");

	for_each_child_of_node(root, node) {
		if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
					   strcmp(node->type, "pciex") != 0))
			continue;

		phb = pcibios_alloc_controller(node);
		if (!phb)
			continue;
		rtas_setup_phb(phb);
		pci_process_bridge_OF_ranges(phb, node, 0);
		isa_bridge_find_early(phb);
451
		phb->controller_ops = pseries_pci_controller_ops;
452 453 454 455 456 457 458 459
	}

	of_node_put(root);

	/*
	 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
	 * in chosen.
	 */
460
	of_pci_check_probe_only();
461 462
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
{
	if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
		security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);

	if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
		security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);

	if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
		security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);

	if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
		security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);

	if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
		security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);

	if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);

	/*
	 * The features below are enabled by default, so we instead look to see
	 * if firmware has *disabled* them, and clear them if so.
	 */
	if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY))
		security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);

	if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR))
		security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);

	if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
		security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}

497
void pseries_setup_rfi_flush(void)
498 499 500 501 502 503 504
{
	struct h_cpu_char_result result;
	enum l1d_flush_type types;
	bool enable;
	long rc;

	rc = plpar_get_cpu_characteristics(&result);
505
	if (rc == H_SUCCESS)
506 507 508 509 510 511 512 513
		init_cpu_char_feature_flags(&result);

	/*
	 * We're the guest so this doesn't apply to us, clear it to simplify
	 * handling of it elsewhere.
	 */
	security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);

514 515 516 517 518 519 520 521 522 523 524
	types = L1D_FLUSH_FALLBACK;

	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
		types |= L1D_FLUSH_MTTRIG;

	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
		types |= L1D_FLUSH_ORI;

	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);

525 526 527
	setup_rfi_flush(types, enable);
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
#ifdef CONFIG_PCI_IOV
enum rtas_iov_fw_value_map {
	NUM_RES_PROPERTY  = 0, /* Number of Resources */
	LOW_INT           = 1, /* Lowest 32 bits of Address */
	START_OF_ENTRIES  = 2, /* Always start of entry */
	APERTURE_PROPERTY = 2, /* Start of entry+ to  Aperture Size */
	WDW_SIZE_PROPERTY = 4, /* Start of entry+ to Window Size */
	NEXT_ENTRY        = 7  /* Go to next entry on array */
};

enum get_iov_fw_value_index {
	BAR_ADDRS     = 1,    /*  Get Bar Address */
	APERTURE_SIZE = 2,    /*  Get Aperture Size */
	WDW_SIZE      = 3     /*  Get Window Size */
};

resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
					 enum get_iov_fw_value_index value)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(dev);
	int i, num_res, ret = 0;

	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return  0;

	/*
	 * First element in the array is the number of Bars
	 * returned.  Search through the list to find the matching
	 * bar
	 */
	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	if (resno >= num_res)
		return 0; /* or an errror */

	i = START_OF_ENTRIES + NEXT_ENTRY * resno;
	switch (value) {
	case BAR_ADDRS:
		ret = of_read_number(&indexes[i], 2);
		break;
	case APERTURE_SIZE:
		ret = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
		break;
	case WDW_SIZE:
		ret = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
		break;
	}

	return ret;
}

void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
{
	struct resource *res;
	resource_size_t base, size;
	int i, r, num_res;

	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	num_res = min_t(int, num_res, PCI_SRIOV_NUM_BARS);
	for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
	     i += NEXT_ENTRY, r++) {
		res = &dev->resource[r + PCI_IOV_RESOURCES];
		base = of_read_number(&indexes[i], 2);
		size = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
		res->flags = pci_parse_of_flags(of_read_number
						(&indexes[i + LOW_INT], 1), 0);
		res->flags |= (IORESOURCE_MEM_64 | IORESOURCE_PCI_FIXED);
		res->name = pci_name(dev);
		res->start = base;
		res->end = base + size - 1;
	}
}

void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
{
	struct resource *res, *root, *conflict;
	resource_size_t base, size;
	int i, r, num_res;

	/*
	 * First element in the array is the number of Bars
	 * returned.  Search through the list to find the matching
	 * bars assign them from firmware into resources structure.
	 */
	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
	     i += NEXT_ENTRY, r++) {
		res = &dev->resource[r + PCI_IOV_RESOURCES];
		base = of_read_number(&indexes[i], 2);
		size = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
		res->name = pci_name(dev);
		res->start = base;
		res->end = base + size - 1;
		root = &iomem_resource;
		dev_dbg(&dev->dev,
			"pSeries IOV BAR %d: trying firmware assignment %pR\n",
			 r + PCI_IOV_RESOURCES, res);
		conflict = request_resource_conflict(root, res);
		if (conflict) {
			dev_info(&dev->dev,
				 "BAR %d: %pR conflicts with %s %pR\n",
				 r + PCI_IOV_RESOURCES, res,
				 conflict->name, conflict);
			res->flags |= IORESOURCE_UNSET;
		}
	}
}

static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	/*Firmware must support open sriov otherwise dont configure*/
	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return;
	/* Assign the addresses from device tree*/
	of_pci_set_vf_bar_size(pdev, indexes);
}

static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	if (!pdev->is_physfn || pdev->is_added)
		return;
	/*Firmware must support open sriov otherwise dont configure*/
	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return;
	/* Assign the addresses from device tree*/
	of_pci_parse_iov_addrs(pdev, indexes);
}

static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
							  int resno)
{
	const __be32 *reg;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	/*Firmware must support open sriov otherwise report regular alignment*/
	reg = of_get_property(dn, "ibm,is-open-sriov-pf", NULL);
	if (!reg)
		return pci_iov_resource_size(pdev, resno);

	if (!pdev->is_physfn)
		return 0;
	return pseries_get_iov_fw_value(pdev,
					resno - PCI_IOV_RESOURCES,
					APERTURE_SIZE);
}
#endif

684 685
static void __init pSeries_setup_arch(void)
{
686
	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
687

688
	/* Discover PIC type and setup ppc_md accordingly */
689
	smp_init_pseries();
690

691

L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699 700
	/* openpic global configuration register (64-bit format). */
	/* openpic Interrupt Source Unit pointer (64-bit format). */
	/* python0 facility area (mmio) (64-bit format) REAL address. */

	/* init to some ~sane value until calibrate_delay() runs */
	loops_per_jiffy = 50000000;

	fwnmi_init();

701 702
	pseries_setup_rfi_flush();

703
	/* By default, only probe PCI (can be overridden by rtas_pci) */
704
	pci_add_flags(PCI_PROBE_ONLY);
705

L
Linus Torvalds 已提交
706 707 708
	/* Find and initialize PCI host bridges */
	init_pci_config_tokens();
	find_and_init_phbs();
709
	of_reconfig_notifier_register(&pci_dn_reconfig_nb);
L
Linus Torvalds 已提交
710 711 712

	pSeries_nvram_init();

713
	if (firmware_has_feature(FW_FEATURE_LPAR)) {
714
		vpa_init(boot_cpuid);
715
		ppc_md.power_save = pseries_lpar_idle;
716
		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
717 718 719 720 721 722 723 724
#ifdef CONFIG_PCI_IOV
		ppc_md.pcibios_fixup_resources =
			pseries_pci_fixup_resources;
		ppc_md.pcibios_fixup_sriov =
			pseries_pci_fixup_iov_resources;
		ppc_md.pcibios_iov_resource_alignment =
			pseries_pci_iov_resource_alignment;
#endif
725 726
	} else {
		/* No special idle routine */
727
		ppc_md.enable_pmcs = power4_enable_pmcs;
728
	}
729

730
	ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
L
Linus Torvalds 已提交
731 732
}

733 734 735 736 737 738
static void pseries_panic(char *str)
{
	panic_flush_kmsg_end();
	rtas_os_term(str);
}

L
Linus Torvalds 已提交
739 740 741
static int __init pSeries_init_panel(void)
{
	/* Manually leave the kernel version on the panel. */
742
#ifdef __BIG_ENDIAN__
L
Linus Torvalds 已提交
743
	ppc_md.progress("Linux ppc64\n", 0);
744 745 746
#else
	ppc_md.progress("Linux ppc64le\n", 0);
#endif
747
	ppc_md.progress(init_utsname()->version, 0);
L
Linus Torvalds 已提交
748 749 750

	return 0;
}
751
machine_arch_initcall(pseries, pSeries_init_panel);
L
Linus Torvalds 已提交
752

753
static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
754
{
755
	return plpar_hcall_norets(H_SET_DABR, dabr);
756 757
}

758
static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
759
{
760 761 762 763
	/* Have to set at least one bit in the DABRX according to PAPR */
	if (dabrx == 0 && dabr == 0)
		dabrx = DABRX_USER;
	/* PAPR says we can only set kernel and user bits */
764
	dabrx &= DABRX_KERNEL | DABRX_USER;
765 766

	return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
767
}
L
Linus Torvalds 已提交
768

769 770 771 772 773
static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
{
	/* PAPR says we can't set HYP */
	dawrx &= ~DAWRX_HYP;

774
	return  plpar_set_watchpoint0(dawr, dawrx);
775 776
}

777 778 779
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026

780 781 782 783 784 785 786 787 788 789
void pSeries_coalesce_init(void)
{
	struct hvcall_mpp_x_data mpp_x_data;

	if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
		powerpc_firmware_features |= FW_FEATURE_XCMO;
	else
		powerpc_firmware_features &= ~FW_FEATURE_XCMO;
}

790 791 792 793
/**
 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
 * handle that here. (Stolen from parse_system_parameter_string)
 */
794
static void pSeries_cmo_feature_init(void)
795 796 797
{
	char *ptr, *key, *value, *end;
	int call_status;
798
	int page_order = IOMMU_PAGE_SHIFT_4K;
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836

	pr_debug(" -> fw_cmo_feature_init()\n");
	spin_lock(&rtas_data_buf_lock);
	memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
	call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
				NULL,
				CMO_CHARACTERISTICS_TOKEN,
				__pa(rtas_data_buf),
				RTAS_DATA_BUF_SIZE);

	if (call_status != 0) {
		spin_unlock(&rtas_data_buf_lock);
		pr_debug("CMO not available\n");
		pr_debug(" <- fw_cmo_feature_init()\n");
		return;
	}

	end = rtas_data_buf + CMO_MAXLENGTH - 2;
	ptr = rtas_data_buf + 2;	/* step over strlen value */
	key = value = ptr;

	while (*ptr && (ptr <= end)) {
		/* Separate the key and value by replacing '=' with '\0' and
		 * point the value at the string after the '='
		 */
		if (ptr[0] == '=') {
			ptr[0] = '\0';
			value = ptr + 1;
		} else if (ptr[0] == '\0' || ptr[0] == ',') {
			/* Terminate the string containing the key/value pair */
			ptr[0] = '\0';

			if (key == value) {
				pr_debug("Malformed key/value pair\n");
				/* Never found a '=', end processing */
				break;
			}

837 838 839 840
			if (0 == strcmp(key, "CMOPageSize"))
				page_order = simple_strtol(value, NULL, 10);
			else if (0 == strcmp(key, "PrPSP"))
				CMO_PrPSP = simple_strtol(value, NULL, 10);
841
			else if (0 == strcmp(key, "SecPSP"))
842
				CMO_SecPSP = simple_strtol(value, NULL, 10);
843 844 845 846 847
			value = key = ptr + 1;
		}
		ptr++;
	}

848 849 850 851 852 853 854
	/* Page size is returned as the power of 2 of the page size,
	 * convert to the page size in bytes before returning
	 */
	CMO_PageSize = 1 << page_order;
	pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);

	if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
855
		pr_info("CMO enabled\n");
856 857
		pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
		         CMO_SecPSP);
858
		powerpc_firmware_features |= FW_FEATURE_CMO;
859
		pSeries_coalesce_init();
860
	} else
861 862
		pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
		         CMO_SecPSP);
863 864 865 866
	spin_unlock(&rtas_data_buf_lock);
	pr_debug(" <- fw_cmo_feature_init()\n");
}

L
Linus Torvalds 已提交
867 868 869
/*
 * Early initialization.  Relocation is on but do not reference unbolted pages
 */
870
static void __init pseries_init(void)
L
Linus Torvalds 已提交
871
{
872
	pr_debug(" -> pseries_init()\n");
L
Linus Torvalds 已提交
873

874
#ifdef CONFIG_HVC_CONSOLE
875
	if (firmware_has_feature(FW_FEATURE_LPAR))
876 877
		hvc_vio_init_early();
#endif
M
Michael Neuling 已提交
878
	if (firmware_has_feature(FW_FEATURE_XDABR))
879
		ppc_md.set_dabr = pseries_set_xdabr;
M
Michael Neuling 已提交
880 881
	else if (firmware_has_feature(FW_FEATURE_DABR))
		ppc_md.set_dabr = pseries_set_dabr;
L
Linus Torvalds 已提交
882

883 884 885
	if (firmware_has_feature(FW_FEATURE_SET_MODE))
		ppc_md.set_dawr = pseries_set_dawr;

886
	pSeries_cmo_feature_init();
L
Linus Torvalds 已提交
887 888
	iommu_init_early_pSeries();

889
	pr_debug(" <- pseries_init()\n");
L
Linus Torvalds 已提交
890 891
}

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
/**
 * pseries_power_off - tell firmware about how to power off the system.
 *
 * This function calls either the power-off rtas token in normal cases
 * or the ibm,power-off-ups token (if present & requested) in case of
 * a power failure. If power-off token is used, power on will only be
 * possible with power button press. If ibm,power-off-ups token is used
 * it will allow auto poweron after power is restored.
 */
static void pseries_power_off(void)
{
	int rc;
	int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");

	if (rtas_flash_term_hook)
		rtas_flash_term_hook(SYS_POWER_OFF);

	if (rtas_poweron_auto == 0 ||
		rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
		rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
		printk(KERN_INFO "RTAS power-off returned %d\n", rc);
	} else {
		rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
		printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
	}
	for (;;);
}

920 921
static int __init pSeries_probe(void)
{
922
	const char *dtype = of_get_property(of_root, "device_type", NULL);
923

924 925 926
 	if (dtype == NULL)
 		return 0;
 	if (strcmp(dtype, "chrp"))
L
Linus Torvalds 已提交
927 928
		return 0;

929 930 931
	/* Cell blades firmware claims to be chrp while it's not. Until this
	 * is fixed, we need to avoid those here.
	 */
932 933
	if (of_machine_is_compatible("IBM,CPBW-1.0") ||
	    of_machine_is_compatible("IBM,CBEA"))
934 935
		return 0;

936 937
	pm_power_off = pseries_power_off;

938 939
	pr_debug("Machine is%s LPAR !\n",
	         (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
940

941 942
	pseries_init();

L
Linus Torvalds 已提交
943 944 945
	return 1;
}

946 947
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
948
	if (firmware_has_feature(FW_FEATURE_LPAR))
949 950 951 952
		return PCI_PROBE_DEVTREE;
	return PCI_PROBE_NORMAL;
}

953 954 955 956
struct pci_controller_ops pseries_pci_controller_ops = {
	.probe_mode		= pSeries_pci_probe_mode,
};

957 958
define_machine(pseries) {
	.name			= "pSeries",
L
Linus Torvalds 已提交
959 960
	.probe			= pSeries_probe,
	.setup_arch		= pSeries_setup_arch,
961
	.init_IRQ		= pseries_init_irq,
962
	.show_cpuinfo		= pSeries_show_cpuinfo,
L
Linus Torvalds 已提交
963 964
	.log_error		= pSeries_log_error,
	.pcibios_fixup		= pSeries_final_fixup,
965 966
	.restart		= rtas_restart,
	.halt			= rtas_halt,
967
	.panic			= pseries_panic,
968 969 970
	.get_boot_time		= rtas_get_boot_time,
	.get_rtc_time		= rtas_get_rtc_time,
	.set_rtc_time		= rtas_set_rtc_time,
971
	.calibrate_decr		= generic_calibrate_decr,
972
	.progress		= rtas_progress,
L
Linus Torvalds 已提交
973 974
	.system_reset_exception = pSeries_system_reset_exception,
	.machine_check_exception = pSeries_machine_check_exception,
975
#ifdef CONFIG_KEXEC_CORE
976
	.machine_kexec          = pSeries_machine_kexec,
977
	.kexec_cpu_down         = pseries_kexec_cpu_down,
978
#endif
979 980 981
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
	.memory_block_size	= pseries_memory_block_size,
#endif
L
Linus Torvalds 已提交
982
};