setup.c 25.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  64-bit pSeries and RS/6000 setup code.
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 *  Copyright (C) 1995  Linus Torvalds
 *  Adapted from 'alpha' version by Gary Thomas
 *  Modified by Cort Dougan (cort@cs.nmt.edu)
 *  Modified by PPC64 Team, IBM Corp
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

/*
 * bootup setup stuff..
 */

19
#include <linux/cpu.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/pci.h>
35
#include <linux/utsname.h>
L
Linus Torvalds 已提交
36
#include <linux/adb.h>
37
#include <linux/export.h>
L
Linus Torvalds 已提交
38 39 40 41
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
42
#include <linux/of.h>
43
#include <linux/of_pci.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57

#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/nvram.h>
58
#include <asm/pmc.h>
59
#include <asm/xics.h>
60
#include <asm/xive.h>
61
#include <asm/ppc-pci.h>
62 63
#include <asm/i8259.h>
#include <asm/udbg.h>
P
Paul Mackerras 已提交
64
#include <asm/smp.h>
65
#include <asm/firmware.h>
66
#include <asm/eeh.h>
67
#include <asm/reg.h>
68
#include <asm/plpar_wrappers.h>
69
#include <asm/kexec.h>
70
#include <asm/isa-bridge.h>
71
#include <asm/security_features.h>
72
#include <asm/asm-const.h>
L
Linus Torvalds 已提交
73

74
#include "pseries.h"
75

76 77
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
78
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
A
Andrew Morton 已提交
79
EXPORT_SYMBOL(CMO_PageSize);
L
Linus Torvalds 已提交
80 81 82

int fwnmi_active;  /* TRUE if an FWNMI handler is present */

83
static void pSeries_show_cpuinfo(struct seq_file *m)
L
Linus Torvalds 已提交
84 85 86 87 88 89
{
	struct device_node *root;
	const char *model = "";

	root = of_find_node_by_path("/");
	if (root)
90
		model = of_get_property(root, "model", NULL);
L
Linus Torvalds 已提交
91 92
	seq_printf(m, "machine\t\t: CHRP %s\n", model);
	of_node_put(root);
93 94 95 96
	if (radix_enabled())
		seq_printf(m, "MMU\t\t: Radix\n");
	else
		seq_printf(m, "MMU\t\t: Hash\n");
L
Linus Torvalds 已提交
97 98 99 100 101 102 103
}

/* Initialize firmware assisted non-maskable interrupts if
 * the firmware supports this feature.
 */
static void __init fwnmi_init(void)
{
104 105
	unsigned long system_reset_addr, machine_check_addr;

L
Linus Torvalds 已提交
106 107 108
	int ibm_nmi_register = rtas_token("ibm,nmi-register");
	if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
		return;
109 110 111 112 113 114 115 116

	/* If the kernel's not linked at zero we point the firmware at low
	 * addresses anyway, and use a trampoline to get to the real code. */
	system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
	machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;

	if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
				machine_check_addr))
L
Linus Torvalds 已提交
117 118 119
		fwnmi_active = 1;
}

120
static void pseries_8259_cascade(struct irq_desc *desc)
121
{
122
	struct irq_chip *chip = irq_desc_get_chip(desc);
O
Olaf Hering 已提交
123
	unsigned int cascade_irq = i8259_irq();
124

125
	if (cascade_irq)
126
		generic_handle_irq(cascade_irq);
127 128

	chip->irq_eoi(&desc->irq_data);
129 130
}

131
static void __init pseries_setup_i8259_cascade(void)
132 133
{
	struct device_node *np, *old, *found = NULL;
134
	unsigned int cascade;
135 136
	const u32 *addrp;
	unsigned long intack = 0;
137
	int naddr;
138

139
	for_each_node_by_type(np, "interrupt-controller") {
140 141 142 143
		if (of_device_is_compatible(np, "chrp,iic")) {
			found = np;
			break;
		}
144 145
	}

146
	if (found == NULL) {
147
		printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
148 149
		return;
	}
150

151
	cascade = irq_of_parse_and_map(found, 0);
152
	if (!cascade) {
153
		printk(KERN_ERR "pic: failed to map cascade interrupt");
154 155
		return;
	}
156
	pr_debug("pic: cascade mapped to irq %d\n", cascade);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173

	for (old = of_node_get(found); old != NULL ; old = np) {
		np = of_get_parent(old);
		of_node_put(old);
		if (np == NULL)
			break;
		if (strcmp(np->name, "pci") != 0)
			continue;
		addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
		if (addrp == NULL)
			continue;
		naddr = of_n_addr_cells(np);
		intack = addrp[naddr-1];
		if (naddr > 1)
			intack |= ((unsigned long)addrp[naddr-2]) << 32;
	}
	if (intack)
174
		printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
175 176
	i8259_init(found, intack);
	of_node_put(found);
177
	irq_set_chained_handler(cascade, pseries_8259_cascade);
178 179
}

180
static void __init pseries_init_irq(void)
181
{
182 183 184 185 186
	/* Try using a XIVE if available, otherwise use a XICS */
	if (!xive_spapr_init()) {
		xics_init();
		pseries_setup_i8259_cascade();
	}
187 188
}

189 190 191 192 193 194 195 196 197
static void pseries_lpar_enable_pmcs(void)
{
	unsigned long set, reset;

	set = 1UL << 63;
	reset = 0;
	plpar_hcall_norets(H_PERFMON, set, reset);
}

198
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
199
{
200
	struct of_reconfig_data *rd = data;
201 202
	struct device_node *parent, *np = rd->dn;
	struct pci_dn *pdn;
203 204 205
	int err = NOTIFY_OK;

	switch (action) {
206
	case OF_RECONFIG_ATTACH_NODE:
207 208
		parent = of_get_parent(np);
		pdn = parent ? PCI_DN(parent) : NULL;
G
Gavin Shan 已提交
209
		if (pdn)
210
			pci_add_device_node_info(pdn->phb, np);
211 212

		of_node_put(parent);
213
		break;
214
	case OF_RECONFIG_DETACH_NODE:
215 216 217
		pdn = PCI_DN(np);
		if (pdn)
			list_del(&pdn->list);
218
		break;
219 220 221 222 223 224 225 226 227 228 229
	default:
		err = NOTIFY_DONE;
		break;
	}
	return err;
}

static struct notifier_block pci_dn_reconfig_nb = {
	.notifier_call = pci_dn_reconfig_notifier,
};

230 231
struct kmem_cache *dtl_cache;

232
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
233 234 235 236 237 238 239 240 241 242 243 244 245 246
/*
 * Allocate space for the dispatch trace log for all possible cpus
 * and register the buffers with the hypervisor.  This is used for
 * computing time stolen by the hypervisor.
 */
static int alloc_dispatch_logs(void)
{
	int cpu, ret;
	struct paca_struct *pp;
	struct dtl_entry *dtl;

	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
		return 0;

247
	if (!dtl_cache)
248 249
		return 0;

250
	for_each_possible_cpu(cpu) {
251
		pp = paca_ptrs[cpu];
252
		dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		if (!dtl) {
			pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
				cpu);
			pr_warn("Stolen time statistics will be unreliable\n");
			break;
		}

		pp->dtl_ridx = 0;
		pp->dispatch_log = dtl;
		pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
		pp->dtl_curr = dtl;
	}

	/* Register the DTL for the current (boot) cpu */
	dtl = get_paca()->dispatch_log;
	get_paca()->dtl_ridx = 0;
	get_paca()->dtl_curr = dtl;
	get_paca()->lppaca_ptr->dtl_idx = 0;

	/* hypervisor reads buffer length from this field */
273
	dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
274 275
	ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
	if (ret)
276 277 278
		pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
		       "with %d\n", smp_processor_id(),
		       hard_smp_processor_id(), ret);
279 280 281 282
	get_paca()->lppaca_ptr->dtl_enable_mask = 2;

	return 0;
}
283
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
284 285 286 287
static inline int alloc_dispatch_logs(void)
{
	return 0;
}
288
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
289

290 291 292 293 294 295 296 297 298 299 300 301
static int alloc_dispatch_log_kmem_cache(void)
{
	dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
						DISPATCH_LOG_BYTES, 0, NULL);
	if (!dtl_cache) {
		pr_warn("Failed to create dispatch trace log buffer cache\n");
		pr_warn("Stolen time statistics will be unreliable\n");
		return 0;
	}

	return alloc_dispatch_logs();
}
302
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
303

304
static void pseries_lpar_idle(void)
305
{
306 307
	/*
	 * Default handler to go into low thread priority and possibly
M
Michael Ellerman 已提交
308
	 * low power mode by ceding processor to hypervisor
309
	 */
310 311 312 313 314 315 316 317 318 319 320 321 322 323

	/* Indicate to hypervisor that we are idle. */
	get_lppaca()->idle = 1;

	/*
	 * Yield the processor to the hypervisor.  We return if
	 * an external interrupt occurs (which are driven prior
	 * to returning here) or if a prod occurs from another
	 * processor. When returning here, external interrupts
	 * are enabled.
	 */
	cede_processor();

	get_lppaca()->idle = 0;
324 325
}

326 327 328 329 330 331 332
/*
 * Enable relocation on during exceptions. This has partition wide scope and
 * may take a while to complete, if it takes longer than one second we will
 * just give up rather than wasting any more time on this - if that turns out
 * to ever be a problem in practice we can move this into a kernel thread to
 * finish off the process later in boot.
 */
333
void pseries_enable_reloc_on_exc(void)
334 335 336 337 338 339
{
	long rc;
	unsigned int delay, total_delay = 0;

	while (1) {
		rc = enable_reloc_on_exceptions();
340 341 342 343 344 345 346 347 348 349
		if (!H_IS_LONG_BUSY(rc)) {
			if (rc == H_P2) {
				pr_info("Relocation on exceptions not"
					" supported\n");
			} else if (rc != H_SUCCESS) {
				pr_warn("Unable to enable relocation"
					" on exceptions: %ld\n", rc);
			}
			break;
		}
350 351 352 353 354 355 356

		delay = get_longbusy_msecs(rc);
		total_delay += delay;
		if (total_delay > 1000) {
			pr_warn("Warning: Giving up waiting to enable "
				"relocation on exceptions (%u msec)!\n",
				total_delay);
357
			return;
358 359 360 361 362
		}

		mdelay(delay);
	}
}
363
EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
364

365
void pseries_disable_reloc_on_exc(void)
366 367 368 369 370 371
{
	long rc;

	while (1) {
		rc = disable_reloc_on_exceptions();
		if (!H_IS_LONG_BUSY(rc))
372
			break;
373 374
		mdelay(get_longbusy_msecs(rc));
	}
375
	if (rc != H_SUCCESS)
376 377
		pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
			rc);
378
}
379
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
380

381
#ifdef CONFIG_KEXEC_CORE
382 383
static void pSeries_machine_kexec(struct kimage *image)
{
384 385
	if (firmware_has_feature(FW_FEATURE_SET_MODE))
		pseries_disable_reloc_on_exc();
386 387 388 389 390

	default_machine_kexec(image);
}
#endif

391
#ifdef __LITTLE_ENDIAN__
392
void pseries_big_endian_exceptions(void)
393 394 395 396 397 398
{
	long rc;

	while (1) {
		rc = enable_big_endian_exceptions();
		if (!H_IS_LONG_BUSY(rc))
399
			break;
400 401
		mdelay(get_longbusy_msecs(rc));
	}
402 403 404 405 406 407 408 409 410 411 412 413 414 415

	/*
	 * At this point it is unlikely panic() will get anything
	 * out to the user, since this is called very late in kexec
	 * but at least this will stop us from continuing on further
	 * and creating an even more difficult to debug situation.
	 *
	 * There is a known problem when kdump'ing, if cpus are offline
	 * the above call will fail. Rather than panicking again, keep
	 * going and hope the kdump kernel is also little endian, which
	 * it usually is.
	 */
	if (rc && !kdump_in_progress())
		panic("Could not enable big endian exceptions");
416 417
}

418
void pseries_little_endian_exceptions(void)
419 420 421 422 423 424
{
	long rc;

	while (1) {
		rc = enable_little_endian_exceptions();
		if (!H_IS_LONG_BUSY(rc))
425
			break;
426 427
		mdelay(get_longbusy_msecs(rc));
	}
428 429 430 431
	if (rc) {
		ppc_md.progress("H_SET_MODE LE exception fail", 0);
		panic("Could not enable little endian exceptions");
	}
432 433 434
}
#endif

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
static void __init find_and_init_phbs(void)
{
	struct device_node *node;
	struct pci_controller *phb;
	struct device_node *root = of_find_node_by_path("/");

	for_each_child_of_node(root, node) {
		if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
					   strcmp(node->type, "pciex") != 0))
			continue;

		phb = pcibios_alloc_controller(node);
		if (!phb)
			continue;
		rtas_setup_phb(phb);
		pci_process_bridge_OF_ranges(phb, node, 0);
		isa_bridge_find_early(phb);
452
		phb->controller_ops = pseries_pci_controller_ops;
453 454 455 456 457 458 459 460
	}

	of_node_put(root);

	/*
	 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
	 * in chosen.
	 */
461
	of_pci_check_probe_only();
462 463
}

464 465
static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
{
466 467 468 469
	/*
	 * The features below are disabled by default, so we instead look to see
	 * if firmware has *enabled* them, and set them if so.
	 */
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
	if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
		security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);

	if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
		security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);

	if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
		security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);

	if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
		security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);

	if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
		security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);

	if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);

	/*
	 * The features below are enabled by default, so we instead look to see
	 * if firmware has *disabled* them, and clear them if so.
	 */
492
	if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
493 494
		security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);

495
	if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
496 497
		security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);

498
	if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
499 500 501
		security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}

502
void pseries_setup_rfi_flush(void)
503 504 505 506 507 508
{
	struct h_cpu_char_result result;
	enum l1d_flush_type types;
	bool enable;
	long rc;

509 510 511 512 513 514 515
	/*
	 * Set features to the defaults assumed by init_cpu_char_feature_flags()
	 * so it can set/clear again any features that might have changed after
	 * migration, and in case the hypercall fails and it is not even called.
	 */
	powerpc_security_features = SEC_FTR_DEFAULT;

516
	rc = plpar_get_cpu_characteristics(&result);
517
	if (rc == H_SUCCESS)
518 519 520 521 522 523 524 525
		init_cpu_char_feature_flags(&result);

	/*
	 * We're the guest so this doesn't apply to us, clear it to simplify
	 * handling of it elsewhere.
	 */
	security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);

526 527 528 529 530 531 532 533 534 535 536
	types = L1D_FLUSH_FALLBACK;

	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
		types |= L1D_FLUSH_MTTRIG;

	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
		types |= L1D_FLUSH_ORI;

	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);

537
	setup_rfi_flush(types, enable);
538
	setup_barrier_nospec();
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
#ifdef CONFIG_PCI_IOV
enum rtas_iov_fw_value_map {
	NUM_RES_PROPERTY  = 0, /* Number of Resources */
	LOW_INT           = 1, /* Lowest 32 bits of Address */
	START_OF_ENTRIES  = 2, /* Always start of entry */
	APERTURE_PROPERTY = 2, /* Start of entry+ to  Aperture Size */
	WDW_SIZE_PROPERTY = 4, /* Start of entry+ to Window Size */
	NEXT_ENTRY        = 7  /* Go to next entry on array */
};

enum get_iov_fw_value_index {
	BAR_ADDRS     = 1,    /*  Get Bar Address */
	APERTURE_SIZE = 2,    /*  Get Aperture Size */
	WDW_SIZE      = 3     /*  Get Window Size */
};

resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
					 enum get_iov_fw_value_index value)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(dev);
	int i, num_res, ret = 0;

	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return  0;

	/*
	 * First element in the array is the number of Bars
	 * returned.  Search through the list to find the matching
	 * bar
	 */
	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	if (resno >= num_res)
		return 0; /* or an errror */

	i = START_OF_ENTRIES + NEXT_ENTRY * resno;
	switch (value) {
	case BAR_ADDRS:
		ret = of_read_number(&indexes[i], 2);
		break;
	case APERTURE_SIZE:
		ret = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
		break;
	case WDW_SIZE:
		ret = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
		break;
	}

	return ret;
}

void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
{
	struct resource *res;
	resource_size_t base, size;
	int i, r, num_res;

	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	num_res = min_t(int, num_res, PCI_SRIOV_NUM_BARS);
	for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
	     i += NEXT_ENTRY, r++) {
		res = &dev->resource[r + PCI_IOV_RESOURCES];
		base = of_read_number(&indexes[i], 2);
		size = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
		res->flags = pci_parse_of_flags(of_read_number
						(&indexes[i + LOW_INT], 1), 0);
		res->flags |= (IORESOURCE_MEM_64 | IORESOURCE_PCI_FIXED);
		res->name = pci_name(dev);
		res->start = base;
		res->end = base + size - 1;
	}
}

void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
{
	struct resource *res, *root, *conflict;
	resource_size_t base, size;
	int i, r, num_res;

	/*
	 * First element in the array is the number of Bars
	 * returned.  Search through the list to find the matching
	 * bars assign them from firmware into resources structure.
	 */
	num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
	for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
	     i += NEXT_ENTRY, r++) {
		res = &dev->resource[r + PCI_IOV_RESOURCES];
		base = of_read_number(&indexes[i], 2);
		size = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
		res->name = pci_name(dev);
		res->start = base;
		res->end = base + size - 1;
		root = &iomem_resource;
		dev_dbg(&dev->dev,
			"pSeries IOV BAR %d: trying firmware assignment %pR\n",
			 r + PCI_IOV_RESOURCES, res);
		conflict = request_resource_conflict(root, res);
		if (conflict) {
			dev_info(&dev->dev,
				 "BAR %d: %pR conflicts with %s %pR\n",
				 r + PCI_IOV_RESOURCES, res,
				 conflict->name, conflict);
			res->flags |= IORESOURCE_UNSET;
		}
	}
}

static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	/*Firmware must support open sriov otherwise dont configure*/
	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return;
	/* Assign the addresses from device tree*/
	of_pci_set_vf_bar_size(pdev, indexes);
}

static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
{
	const int *indexes;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	if (!pdev->is_physfn || pdev->is_added)
		return;
	/*Firmware must support open sriov otherwise dont configure*/
	indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
	if (!indexes)
		return;
	/* Assign the addresses from device tree*/
	of_pci_parse_iov_addrs(pdev, indexes);
}

static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
							  int resno)
{
	const __be32 *reg;
	struct device_node *dn = pci_device_to_OF_node(pdev);

	/*Firmware must support open sriov otherwise report regular alignment*/
	reg = of_get_property(dn, "ibm,is-open-sriov-pf", NULL);
	if (!reg)
		return pci_iov_resource_size(pdev, resno);

	if (!pdev->is_physfn)
		return 0;
	return pseries_get_iov_fw_value(pdev,
					resno - PCI_IOV_RESOURCES,
					APERTURE_SIZE);
}
#endif

697 698
static void __init pSeries_setup_arch(void)
{
699
	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
700

701
	/* Discover PIC type and setup ppc_md accordingly */
702
	smp_init_pseries();
703

704

L
Linus Torvalds 已提交
705 706 707 708 709 710 711 712 713
	/* openpic global configuration register (64-bit format). */
	/* openpic Interrupt Source Unit pointer (64-bit format). */
	/* python0 facility area (mmio) (64-bit format) REAL address. */

	/* init to some ~sane value until calibrate_delay() runs */
	loops_per_jiffy = 50000000;

	fwnmi_init();

714
	pseries_setup_rfi_flush();
715
	setup_stf_barrier();
716

717
	/* By default, only probe PCI (can be overridden by rtas_pci) */
718
	pci_add_flags(PCI_PROBE_ONLY);
719

L
Linus Torvalds 已提交
720 721 722
	/* Find and initialize PCI host bridges */
	init_pci_config_tokens();
	find_and_init_phbs();
723
	of_reconfig_notifier_register(&pci_dn_reconfig_nb);
L
Linus Torvalds 已提交
724 725 726

	pSeries_nvram_init();

727
	if (firmware_has_feature(FW_FEATURE_LPAR)) {
728
		vpa_init(boot_cpuid);
729
		ppc_md.power_save = pseries_lpar_idle;
730
		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
731 732 733 734 735 736 737 738
#ifdef CONFIG_PCI_IOV
		ppc_md.pcibios_fixup_resources =
			pseries_pci_fixup_resources;
		ppc_md.pcibios_fixup_sriov =
			pseries_pci_fixup_iov_resources;
		ppc_md.pcibios_iov_resource_alignment =
			pseries_pci_iov_resource_alignment;
#endif
739 740
	} else {
		/* No special idle routine */
741
		ppc_md.enable_pmcs = power4_enable_pmcs;
742
	}
743

744
	ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
L
Linus Torvalds 已提交
745 746
}

747 748 749 750 751 752
static void pseries_panic(char *str)
{
	panic_flush_kmsg_end();
	rtas_os_term(str);
}

L
Linus Torvalds 已提交
753 754 755
static int __init pSeries_init_panel(void)
{
	/* Manually leave the kernel version on the panel. */
756
#ifdef __BIG_ENDIAN__
L
Linus Torvalds 已提交
757
	ppc_md.progress("Linux ppc64\n", 0);
758 759 760
#else
	ppc_md.progress("Linux ppc64le\n", 0);
#endif
761
	ppc_md.progress(init_utsname()->version, 0);
L
Linus Torvalds 已提交
762 763 764

	return 0;
}
765
machine_arch_initcall(pseries, pSeries_init_panel);
L
Linus Torvalds 已提交
766

767
static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
768
{
769
	return plpar_hcall_norets(H_SET_DABR, dabr);
770 771
}

772
static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
773
{
774 775 776 777
	/* Have to set at least one bit in the DABRX according to PAPR */
	if (dabrx == 0 && dabr == 0)
		dabrx = DABRX_USER;
	/* PAPR says we can only set kernel and user bits */
778
	dabrx &= DABRX_KERNEL | DABRX_USER;
779 780

	return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
781
}
L
Linus Torvalds 已提交
782

783 784 785 786 787
static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
{
	/* PAPR says we can't set HYP */
	dawrx &= ~DAWRX_HYP;

788
	return  plpar_set_watchpoint0(dawr, dawrx);
789 790
}

791 792 793
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026

794 795 796 797 798 799 800 801 802 803
void pSeries_coalesce_init(void)
{
	struct hvcall_mpp_x_data mpp_x_data;

	if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
		powerpc_firmware_features |= FW_FEATURE_XCMO;
	else
		powerpc_firmware_features &= ~FW_FEATURE_XCMO;
}

804 805 806 807
/**
 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
 * handle that here. (Stolen from parse_system_parameter_string)
 */
808
static void pSeries_cmo_feature_init(void)
809 810 811
{
	char *ptr, *key, *value, *end;
	int call_status;
812
	int page_order = IOMMU_PAGE_SHIFT_4K;
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

	pr_debug(" -> fw_cmo_feature_init()\n");
	spin_lock(&rtas_data_buf_lock);
	memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
	call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
				NULL,
				CMO_CHARACTERISTICS_TOKEN,
				__pa(rtas_data_buf),
				RTAS_DATA_BUF_SIZE);

	if (call_status != 0) {
		spin_unlock(&rtas_data_buf_lock);
		pr_debug("CMO not available\n");
		pr_debug(" <- fw_cmo_feature_init()\n");
		return;
	}

	end = rtas_data_buf + CMO_MAXLENGTH - 2;
	ptr = rtas_data_buf + 2;	/* step over strlen value */
	key = value = ptr;

	while (*ptr && (ptr <= end)) {
		/* Separate the key and value by replacing '=' with '\0' and
		 * point the value at the string after the '='
		 */
		if (ptr[0] == '=') {
			ptr[0] = '\0';
			value = ptr + 1;
		} else if (ptr[0] == '\0' || ptr[0] == ',') {
			/* Terminate the string containing the key/value pair */
			ptr[0] = '\0';

			if (key == value) {
				pr_debug("Malformed key/value pair\n");
				/* Never found a '=', end processing */
				break;
			}

851 852 853 854
			if (0 == strcmp(key, "CMOPageSize"))
				page_order = simple_strtol(value, NULL, 10);
			else if (0 == strcmp(key, "PrPSP"))
				CMO_PrPSP = simple_strtol(value, NULL, 10);
855
			else if (0 == strcmp(key, "SecPSP"))
856
				CMO_SecPSP = simple_strtol(value, NULL, 10);
857 858 859 860 861
			value = key = ptr + 1;
		}
		ptr++;
	}

862 863 864 865 866 867 868
	/* Page size is returned as the power of 2 of the page size,
	 * convert to the page size in bytes before returning
	 */
	CMO_PageSize = 1 << page_order;
	pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);

	if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
869
		pr_info("CMO enabled\n");
870 871
		pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
		         CMO_SecPSP);
872
		powerpc_firmware_features |= FW_FEATURE_CMO;
873
		pSeries_coalesce_init();
874
	} else
875 876
		pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
		         CMO_SecPSP);
877 878 879 880
	spin_unlock(&rtas_data_buf_lock);
	pr_debug(" <- fw_cmo_feature_init()\n");
}

L
Linus Torvalds 已提交
881 882 883
/*
 * Early initialization.  Relocation is on but do not reference unbolted pages
 */
884
static void __init pseries_init(void)
L
Linus Torvalds 已提交
885
{
886
	pr_debug(" -> pseries_init()\n");
L
Linus Torvalds 已提交
887

888
#ifdef CONFIG_HVC_CONSOLE
889
	if (firmware_has_feature(FW_FEATURE_LPAR))
890 891
		hvc_vio_init_early();
#endif
M
Michael Neuling 已提交
892
	if (firmware_has_feature(FW_FEATURE_XDABR))
893
		ppc_md.set_dabr = pseries_set_xdabr;
M
Michael Neuling 已提交
894 895
	else if (firmware_has_feature(FW_FEATURE_DABR))
		ppc_md.set_dabr = pseries_set_dabr;
L
Linus Torvalds 已提交
896

897 898 899
	if (firmware_has_feature(FW_FEATURE_SET_MODE))
		ppc_md.set_dawr = pseries_set_dawr;

900
	pSeries_cmo_feature_init();
L
Linus Torvalds 已提交
901 902
	iommu_init_early_pSeries();

903
	pr_debug(" <- pseries_init()\n");
L
Linus Torvalds 已提交
904 905
}

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
/**
 * pseries_power_off - tell firmware about how to power off the system.
 *
 * This function calls either the power-off rtas token in normal cases
 * or the ibm,power-off-ups token (if present & requested) in case of
 * a power failure. If power-off token is used, power on will only be
 * possible with power button press. If ibm,power-off-ups token is used
 * it will allow auto poweron after power is restored.
 */
static void pseries_power_off(void)
{
	int rc;
	int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");

	if (rtas_flash_term_hook)
		rtas_flash_term_hook(SYS_POWER_OFF);

	if (rtas_poweron_auto == 0 ||
		rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
		rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
		printk(KERN_INFO "RTAS power-off returned %d\n", rc);
	} else {
		rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
		printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
	}
	for (;;);
}

934 935
static int __init pSeries_probe(void)
{
936
	const char *dtype = of_get_property(of_root, "device_type", NULL);
937

938 939 940
 	if (dtype == NULL)
 		return 0;
 	if (strcmp(dtype, "chrp"))
L
Linus Torvalds 已提交
941 942
		return 0;

943 944 945
	/* Cell blades firmware claims to be chrp while it's not. Until this
	 * is fixed, we need to avoid those here.
	 */
946 947
	if (of_machine_is_compatible("IBM,CPBW-1.0") ||
	    of_machine_is_compatible("IBM,CBEA"))
948 949
		return 0;

950 951
	pm_power_off = pseries_power_off;

952 953
	pr_debug("Machine is%s LPAR !\n",
	         (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
954

955 956
	pseries_init();

L
Linus Torvalds 已提交
957 958 959
	return 1;
}

960 961
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
962
	if (firmware_has_feature(FW_FEATURE_LPAR))
963 964 965 966
		return PCI_PROBE_DEVTREE;
	return PCI_PROBE_NORMAL;
}

967 968 969 970
struct pci_controller_ops pseries_pci_controller_ops = {
	.probe_mode		= pSeries_pci_probe_mode,
};

971 972
define_machine(pseries) {
	.name			= "pSeries",
L
Linus Torvalds 已提交
973 974
	.probe			= pSeries_probe,
	.setup_arch		= pSeries_setup_arch,
975
	.init_IRQ		= pseries_init_irq,
976
	.show_cpuinfo		= pSeries_show_cpuinfo,
L
Linus Torvalds 已提交
977 978
	.log_error		= pSeries_log_error,
	.pcibios_fixup		= pSeries_final_fixup,
979 980
	.restart		= rtas_restart,
	.halt			= rtas_halt,
981
	.panic			= pseries_panic,
982 983 984
	.get_boot_time		= rtas_get_boot_time,
	.get_rtc_time		= rtas_get_rtc_time,
	.set_rtc_time		= rtas_set_rtc_time,
985
	.calibrate_decr		= generic_calibrate_decr,
986
	.progress		= rtas_progress,
L
Linus Torvalds 已提交
987 988
	.system_reset_exception = pSeries_system_reset_exception,
	.machine_check_exception = pSeries_machine_check_exception,
989
#ifdef CONFIG_KEXEC_CORE
990
	.machine_kexec          = pSeries_machine_kexec,
991
	.kexec_cpu_down         = pseries_kexec_cpu_down,
992
#endif
993 994 995
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
	.memory_block_size	= pseries_memory_block_size,
#endif
L
Linus Torvalds 已提交
996
};