pci-ioda.c 49.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Support PCI/PCIe on PowerNV platforms
 *
 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12
#undef DEBUG
13 14 15

#include <linux/kernel.h>
#include <linux/pci.h>
16
#include <linux/crash_dump.h>
17
#include <linux/debugfs.h>
18 19 20 21 22 23 24
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
25
#include <linux/memblock.h>
26 27 28 29 30 31

#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
32
#include <asm/msi_bitmap.h>
33 34 35 36
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
37
#include <asm/xics.h>
38
#include <asm/debug.h>
39
#include <asm/firmware.h>
40 41 42 43

#include "powernv.h"
#include "pci.h"

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
			    const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;
	char pfix[32];

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (pe->pdev)
		strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
	else
		sprintf(pfix, "%04x:%02x     ",
			pci_domain_nr(pe->pbus), pe->pbus->number);

	printk("%spci %s: [PE# %.3d] %pV",
	       level, pfix, pe->pe_number, &vaf);

	va_end(args);
}
67

68 69 70 71 72 73
#define pe_err(pe, fmt, ...)					\
	pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
#define pe_warn(pe, fmt, ...)					\
	pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
#define pe_info(pe, fmt, ...)					\
	pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
74

75 76 77 78 79 80 81 82 83 84
/*
 * stdcix is only supposed to be used in hypervisor real mode as per
 * the architecture spec
 */
static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
{
	__asm__ __volatile__("stdcix %0,0,%1"
		: : "r" (val), "r" (paddr) : "memory");
}

85 86 87 88 89 90
static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
{
	return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
		(IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
}

91
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
92 93 94 95 96 97 98 99 100 101
{
	unsigned long pe;

	do {
		pe = find_next_zero_bit(phb->ioda.pe_alloc,
					phb->ioda.total_pe, 0);
		if (pe >= phb->ioda.total_pe)
			return IODA_INVALID_PE;
	} while(test_and_set_bit(pe, phb->ioda.pe_alloc));

102
	phb->ioda.pe_array[pe].phb = phb;
103 104 105 106
	phb->ioda.pe_array[pe].pe_number = pe;
	return pe;
}

107
static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
108 109 110 111 112 113 114
{
	WARN_ON(phb->ioda.pe_array[pe].pdev);

	memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
	clear_bit(pe, phb->ioda.pe_alloc);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/* The default M64 BAR is shared by all PEs */
static int pnv_ioda2_init_m64(struct pnv_phb *phb)
{
	const char *desc;
	struct resource *r;
	s64 rc;

	/* Configure the default M64 BAR */
	rc = opal_pci_set_phb_mem_window(phb->opal_id,
					 OPAL_M64_WINDOW_TYPE,
					 phb->ioda.m64_bar_idx,
					 phb->ioda.m64_base,
					 0, /* unused */
					 phb->ioda.m64_size);
	if (rc != OPAL_SUCCESS) {
		desc = "configuring";
		goto fail;
	}

	/* Enable the default M64 BAR */
	rc = opal_pci_phb_mmio_enable(phb->opal_id,
				      OPAL_M64_WINDOW_TYPE,
				      phb->ioda.m64_bar_idx,
				      OPAL_ENABLE_M64_SPLIT);
	if (rc != OPAL_SUCCESS) {
		desc = "enabling";
		goto fail;
	}

	/* Mark the M64 BAR assigned */
	set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);

	/*
	 * Strip off the segment used by the reserved PE, which is
	 * expected to be 0 or last one of PE capabicity.
	 */
	r = &phb->hose->mem_resources[1];
	if (phb->ioda.reserved_pe == 0)
		r->start += phb->ioda.m64_segsize;
	else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
		r->end -= phb->ioda.m64_segsize;
	else
		pr_warn("  Cannot strip M64 segment for reserved PE#%d\n",
			phb->ioda.reserved_pe);

	return 0;

fail:
	pr_warn("  Failure %lld %s M64 BAR#%d\n",
		rc, desc, phb->ioda.m64_bar_idx);
	opal_pci_phb_mmio_enable(phb->opal_id,
				 OPAL_M64_WINDOW_TYPE,
				 phb->ioda.m64_bar_idx,
				 OPAL_DISABLE_M64);
	return -EIO;
}

static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb)
{
	resource_size_t sgsz = phb->ioda.m64_segsize;
	struct pci_dev *pdev;
	struct resource *r;
	int base, step, i;

	/*
	 * Root bus always has full M64 range and root port has
	 * M64 range used in reality. So we're checking root port
	 * instead of root bus.
	 */
	list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
		for (i = PCI_BRIDGE_RESOURCES;
		     i <= PCI_BRIDGE_RESOURCE_END; i++) {
			r = &pdev->resource[i];
			if (!r->parent ||
			    !pnv_pci_is_mem_pref_64(r->flags))
				continue;

			base = (r->start - phb->ioda.m64_base) / sgsz;
			for (step = 0; step < resource_size(r) / sgsz; step++)
				set_bit(base + step, phb->ioda.pe_alloc);
		}
	}
}

static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
				 struct pci_bus *bus, int all)
{
	resource_size_t segsz = phb->ioda.m64_segsize;
	struct pci_dev *pdev;
	struct resource *r;
	struct pnv_ioda_pe *master_pe, *pe;
	unsigned long size, *pe_alloc;
	bool found;
	int start, i, j;

	/* Root bus shouldn't use M64 */
	if (pci_is_root_bus(bus))
		return IODA_INVALID_PE;

	/* We support only one M64 window on each bus */
	found = false;
	pci_bus_for_each_resource(bus, r, i) {
		if (r && r->parent &&
		    pnv_pci_is_mem_pref_64(r->flags)) {
			found = true;
			break;
		}
	}

	/* No M64 window found ? */
	if (!found)
		return IODA_INVALID_PE;

	/* Allocate bitmap */
	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
	pe_alloc = kzalloc(size, GFP_KERNEL);
	if (!pe_alloc) {
		pr_warn("%s: Out of memory !\n",
			__func__);
		return IODA_INVALID_PE;
	}

	/*
	 * Figure out reserved PE numbers by the PE
	 * the its child PEs.
	 */
	start = (r->start - phb->ioda.m64_base) / segsz;
	for (i = 0; i < resource_size(r) / segsz; i++)
		set_bit(start + i, pe_alloc);

	if (all)
		goto done;

	/*
	 * If the PE doesn't cover all subordinate buses,
	 * we need subtract from reserved PEs for children.
	 */
	list_for_each_entry(pdev, &bus->devices, bus_list) {
		if (!pdev->subordinate)
			continue;

		pci_bus_for_each_resource(pdev->subordinate, r, i) {
			if (!r || !r->parent ||
			    !pnv_pci_is_mem_pref_64(r->flags))
				continue;

			start = (r->start - phb->ioda.m64_base) / segsz;
			for (j = 0; j < resource_size(r) / segsz ; j++)
				clear_bit(start + j, pe_alloc);
                }
        }

	/*
	 * the current bus might not own M64 window and that's all
	 * contributed by its child buses. For the case, we needn't
	 * pick M64 dependent PE#.
	 */
	if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
		kfree(pe_alloc);
		return IODA_INVALID_PE;
	}

	/*
	 * Figure out the master PE and put all slave PEs to master
	 * PE's list to form compound PE.
	 */
done:
	master_pe = NULL;
	i = -1;
	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
		phb->ioda.total_pe) {
		pe = &phb->ioda.pe_array[i];
		pe->phb = phb;
		pe->pe_number = i;

		if (!master_pe) {
			pe->flags |= PNV_IODA_PE_MASTER;
			INIT_LIST_HEAD(&pe->slaves);
			master_pe = pe;
		} else {
			pe->flags |= PNV_IODA_PE_SLAVE;
			pe->master = master_pe;
			list_add_tail(&pe->list, &master_pe->slaves);
		}
	}

	kfree(pe_alloc);
	return master_pe->pe_number;
}

static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
{
	struct pci_controller *hose = phb->hose;
	struct device_node *dn = hose->dn;
	struct resource *res;
	const u32 *r;
	u64 pci_addr;

	if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
		pr_info("  Firmware too old to support M64 window\n");
		return;
	}

	r = of_get_property(dn, "ibm,opal-m64-window", NULL);
	if (!r) {
		pr_info("  No <ibm,opal-m64-window> on %s\n",
			dn->full_name);
		return;
	}

	/* FIXME: Support M64 for P7IOC */
	if (phb->type != PNV_PHB_IODA2) {
		pr_info("  Not support M64 window\n");
		return;
	}

	res = &hose->mem_resources[1];
	res->start = of_translate_address(dn, r + 2);
	res->end = res->start + of_read_number(r + 4, 2) - 1;
	res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
	pci_addr = of_read_number(r, 2);
	hose->mem_offset[1] = res->start - pci_addr;

	phb->ioda.m64_size = resource_size(res);
	phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
	phb->ioda.m64_base = pci_addr;

	/* Use last M64 BAR to cover M64 window */
	phb->ioda.m64_bar_idx = 15;
	phb->init_m64 = pnv_ioda2_init_m64;
	phb->alloc_m64_pe = pnv_ioda2_alloc_m64_pe;
	phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
}

G
Gavin Shan 已提交
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
{
	struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
	struct pnv_ioda_pe *slave;
	s64 rc;

	/* Fetch master PE */
	if (pe->flags & PNV_IODA_PE_SLAVE) {
		pe = pe->master;
		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
		pe_no = pe->pe_number;
	}

	/* Freeze master PE */
	rc = opal_pci_eeh_freeze_set(phb->opal_id,
				     pe_no,
				     OPAL_EEH_ACTION_SET_FREEZE_ALL);
	if (rc != OPAL_SUCCESS) {
		pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
			__func__, rc, phb->hose->global_number, pe_no);
		return;
	}

	/* Freeze slave PEs */
	if (!(pe->flags & PNV_IODA_PE_MASTER))
		return;

	list_for_each_entry(slave, &pe->slaves, list) {
		rc = opal_pci_eeh_freeze_set(phb->opal_id,
					     slave->pe_number,
					     OPAL_EEH_ACTION_SET_FREEZE_ALL);
		if (rc != OPAL_SUCCESS)
			pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
				__func__, rc, phb->hose->global_number,
				slave->pe_number);
	}
}

387
static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
G
Gavin Shan 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
{
	struct pnv_ioda_pe *pe, *slave;
	s64 rc;

	/* Find master PE */
	pe = &phb->ioda.pe_array[pe_no];
	if (pe->flags & PNV_IODA_PE_SLAVE) {
		pe = pe->master;
		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
		pe_no = pe->pe_number;
	}

	/* Clear frozen state for master PE */
	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
	if (rc != OPAL_SUCCESS) {
		pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
			__func__, rc, opt, phb->hose->global_number, pe_no);
		return -EIO;
	}

	if (!(pe->flags & PNV_IODA_PE_MASTER))
		return 0;

	/* Clear frozen state for slave PEs */
	list_for_each_entry(slave, &pe->slaves, list) {
		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
					     slave->pe_number,
					     opt);
		if (rc != OPAL_SUCCESS) {
			pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
				__func__, rc, opt, phb->hose->global_number,
				slave->pe_number);
			return -EIO;
		}
	}

	return 0;
}

static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
{
	struct pnv_ioda_pe *slave, *pe;
	u8 fstate, state;
	__be16 pcierr;
	s64 rc;

	/* Sanity check on PE number */
	if (pe_no < 0 || pe_no >= phb->ioda.total_pe)
		return OPAL_EEH_STOPPED_PERM_UNAVAIL;

	/*
	 * Fetch the master PE and the PE instance might be
	 * not initialized yet.
	 */
	pe = &phb->ioda.pe_array[pe_no];
	if (pe->flags & PNV_IODA_PE_SLAVE) {
		pe = pe->master;
		WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
		pe_no = pe->pe_number;
	}

	/* Check the master PE */
	rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
					&state, &pcierr, NULL);
	if (rc != OPAL_SUCCESS) {
		pr_warn("%s: Failure %lld getting "
			"PHB#%x-PE#%x state\n",
			__func__, rc,
			phb->hose->global_number, pe_no);
		return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
	}

	/* Check the slave PE */
	if (!(pe->flags & PNV_IODA_PE_MASTER))
		return state;

	list_for_each_entry(slave, &pe->slaves, list) {
		rc = opal_pci_eeh_freeze_status(phb->opal_id,
						slave->pe_number,
						&fstate,
						&pcierr,
						NULL);
		if (rc != OPAL_SUCCESS) {
			pr_warn("%s: Failure %lld getting "
				"PHB#%x-PE#%x state\n",
				__func__, rc,
				phb->hose->global_number, slave->pe_number);
			return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
		}

		/*
		 * Override the result based on the ascending
		 * priority.
		 */
		if (fstate > state)
			state = fstate;
	}

	return state;
}

489 490 491 492
/* Currently those 2 are only used when MSIs are enabled, this will change
 * but in the meantime, we need to protect them to avoid warnings
 */
#ifdef CONFIG_PCI_MSI
493
static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
494 495 496
{
	struct pci_controller *hose = pci_bus_to_host(dev->bus);
	struct pnv_phb *phb = hose->private_data;
497
	struct pci_dn *pdn = pci_get_pdn(dev);
498 499 500 501 502 503 504 505 506

	if (!pdn)
		return NULL;
	if (pdn->pe_number == IODA_INVALID_PE)
		return NULL;
	return &phb->ioda.pe_array[pdn->pe_number];
}
#endif /* CONFIG_PCI_MSI */

507
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
508 509 510 511 512 513 514 515 516 517 518 519
{
	struct pci_dev *parent;
	uint8_t bcomp, dcomp, fcomp;
	long rc, rid_end, rid;

	/* Bus validation ? */
	if (pe->pbus) {
		int count;

		dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
		fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
		parent = pe->pbus->self;
520 521 522 523 524
		if (pe->flags & PNV_IODA_PE_BUS_ALL)
			count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
		else
			count = 1;

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		switch(count) {
		case  1: bcomp = OpalPciBusAll;		break;
		case  2: bcomp = OpalPciBus7Bits;	break;
		case  4: bcomp = OpalPciBus6Bits;	break;
		case  8: bcomp = OpalPciBus5Bits;	break;
		case 16: bcomp = OpalPciBus4Bits;	break;
		case 32: bcomp = OpalPciBus3Bits;	break;
		default:
			pr_err("%s: Number of subordinate busses %d"
			       " unsupported\n",
			       pci_name(pe->pbus->self), count);
			/* Do an exact match only */
			bcomp = OpalPciBusAll;
		}
		rid_end = pe->rid + (count << 8);
	} else {
		parent = pe->pdev->bus->self;
		bcomp = OpalPciBusAll;
		dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
		fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
		rid_end = pe->rid + 1;
	}

548 549 550 551 552 553
	/*
	 * Associate PE in PELT. We need add the PE into the
	 * corresponding PELT-V as well. Otherwise, the error
	 * originated from the PE might contribute to other
	 * PEs.
	 */
554 555 556 557 558 559
	rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
			     bcomp, dcomp, fcomp, OPAL_MAP_PE);
	if (rc) {
		pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
		return -ENXIO;
	}
560 561 562 563 564

	rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
				pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
	if (rc)
		pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
565 566 567 568 569
	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);

	/* Add to all parents PELT-V */
	while (parent) {
570
		struct pci_dn *pdn = pci_get_pdn(parent);
571 572
		if (pdn && pdn->pe_number != IODA_INVALID_PE) {
			rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
573
						pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
			/* XXX What to do in case of error ? */
		}
		parent = parent->bus->self;
	}
	/* Setup reverse map */
	for (rid = pe->rid; rid < rid_end; rid++)
		phb->ioda.pe_rmap[rid] = pe->pe_number;

	/* Setup one MVTs on IODA1 */
	if (phb->type == PNV_PHB_IODA1) {
		pe->mve_number = pe->pe_number;
		rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
				      pe->pe_number);
		if (rc) {
			pe_err(pe, "OPAL error %ld setting up MVE %d\n",
			       rc, pe->mve_number);
			pe->mve_number = -1;
		} else {
			rc = opal_pci_set_mve_enable(phb->opal_id,
593
						     pe->mve_number, OPAL_ENABLE_MVE);
594 595 596 597 598 599 600 601 602 603 604 605
			if (rc) {
				pe_err(pe, "OPAL error %ld enabling MVE %d\n",
				       rc, pe->mve_number);
				pe->mve_number = -1;
			}
		}
	} else if (phb->type == PNV_PHB_IODA2)
		pe->mve_number = 0;

	return 0;
}

606 607
static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
				       struct pnv_ioda_pe *pe)
608 609 610
{
	struct pnv_ioda_pe *lpe;

611
	list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
612
		if (lpe->dma_weight < pe->dma_weight) {
613
			list_add_tail(&pe->dma_link, &lpe->dma_link);
614 615 616
			return;
		}
	}
617
	list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
}

static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
{
	/* This is quite simplistic. The "base" weight of a device
	 * is 10. 0 means no DMA is to be accounted for it.
	 */

	/* If it's a bridge, no DMA */
	if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
		return 0;

	/* Reduce the weight of slow USB controllers */
	if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
	    dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
	    dev->class == PCI_CLASS_SERIAL_USB_EHCI)
		return 3;

	/* Increase the weight of RAID (includes Obsidian) */
	if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
		return 15;

	/* Default */
	return 10;
}

644
#if 0
645
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
646 647 648
{
	struct pci_controller *hose = pci_bus_to_host(dev->bus);
	struct pnv_phb *phb = hose->private_data;
649
	struct pci_dn *pdn = pci_get_pdn(dev);
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
	struct pnv_ioda_pe *pe;
	int pe_num;

	if (!pdn) {
		pr_err("%s: Device tree node not associated properly\n",
			   pci_name(dev));
		return NULL;
	}
	if (pdn->pe_number != IODA_INVALID_PE)
		return NULL;

	/* PE#0 has been pre-set */
	if (dev->bus->number == 0)
		pe_num = 0;
	else
		pe_num = pnv_ioda_alloc_pe(phb);
	if (pe_num == IODA_INVALID_PE) {
		pr_warning("%s: Not enough PE# available, disabling device\n",
			   pci_name(dev));
		return NULL;
	}

	/* NOTE: We get only one ref to the pci_dev for the pdn, not for the
	 * pointer in the PE data structure, both should be destroyed at the
	 * same time. However, this needs to be looked at more closely again
	 * once we actually start removing things (Hotplug, SR-IOV, ...)
	 *
	 * At some point we want to remove the PDN completely anyways
	 */
	pe = &phb->ioda.pe_array[pe_num];
	pci_dev_get(dev);
	pdn->pcidev = dev;
	pdn->pe_number = pe_num;
	pe->pdev = dev;
	pe->pbus = NULL;
	pe->tce32_seg = -1;
	pe->mve_number = -1;
	pe->rid = dev->bus->number << 8 | pdn->devfn;

	pe_info(pe, "Associated device to PE\n");

	if (pnv_ioda_configure_pe(phb, pe)) {
		/* XXX What do we do here ? */
		if (pe_num)
			pnv_ioda_free_pe(phb, pe_num);
		pdn->pe_number = IODA_INVALID_PE;
		pe->pdev = NULL;
		pci_dev_put(dev);
		return NULL;
	}

	/* Assign a DMA weight to the device */
	pe->dma_weight = pnv_ioda_dma_weight(dev);
	if (pe->dma_weight != 0) {
		phb->ioda.dma_weight += pe->dma_weight;
		phb->ioda.dma_pe_count++;
	}

	/* Link the PE */
	pnv_ioda_link_pe_by_weight(phb, pe);

	return pe;
}
713
#endif /* Useful for SRIOV case */
714 715 716 717 718 719

static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
720
		struct pci_dn *pdn = pci_get_pdn(dev);
721 722 723 724 725 726 727 728 729

		if (pdn == NULL) {
			pr_warn("%s: No device node associated with device !\n",
				pci_name(dev));
			continue;
		}
		pdn->pcidev = dev;
		pdn->pe_number = pe->pe_number;
		pe->dma_weight += pnv_ioda_dma_weight(dev);
730
		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
731 732 733 734
			pnv_ioda_setup_same_PE(dev->subordinate, pe);
	}
}

735 736 737 738 739 740
/*
 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
 * single PCI bus. Another one that contains the primary PCI bus and its
 * subordinate PCI devices and buses. The second type of PE is normally
 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
 */
741
static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
742
{
743
	struct pci_controller *hose = pci_bus_to_host(bus);
744 745
	struct pnv_phb *phb = hose->private_data;
	struct pnv_ioda_pe *pe;
746 747 748 749 750 751 752 753 754
	int pe_num = IODA_INVALID_PE;

	/* Check if PE is determined by M64 */
	if (phb->pick_m64_pe)
		pe_num = phb->pick_m64_pe(phb, bus, all);

	/* The PE number isn't pinned by M64 */
	if (pe_num == IODA_INVALID_PE)
		pe_num = pnv_ioda_alloc_pe(phb);
755 756

	if (pe_num == IODA_INVALID_PE) {
757 758
		pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
			__func__, pci_domain_nr(bus), bus->number);
759 760 761 762
		return;
	}

	pe = &phb->ioda.pe_array[pe_num];
763
	pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
764 765 766 767
	pe->pbus = bus;
	pe->pdev = NULL;
	pe->tce32_seg = -1;
	pe->mve_number = -1;
768
	pe->rid = bus->busn_res.start << 8;
769 770
	pe->dma_weight = 0;

771 772 773 774 775 776
	if (all)
		pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
			bus->busn_res.start, bus->busn_res.end, pe_num);
	else
		pe_info(pe, "Secondary bus %d associated with PE#%d\n",
			bus->busn_res.start, pe_num);
777 778 779 780 781 782 783 784 785 786 787 788

	if (pnv_ioda_configure_pe(phb, pe)) {
		/* XXX What do we do here ? */
		if (pe_num)
			pnv_ioda_free_pe(phb, pe_num);
		pe->pbus = NULL;
		return;
	}

	/* Associate it with all child devices */
	pnv_ioda_setup_same_PE(bus, pe);

789 790 791
	/* Put PE to the list */
	list_add_tail(&pe->list, &phb->ioda.pe_list);

792 793 794 795 796 797 798 799 800 801 802 803
	/* Account for one DMA PE if at least one DMA capable device exist
	 * below the bridge
	 */
	if (pe->dma_weight != 0) {
		phb->ioda.dma_weight += pe->dma_weight;
		phb->ioda.dma_pe_count++;
	}

	/* Link the PE */
	pnv_ioda_link_pe_by_weight(phb, pe);
}

804
static void pnv_ioda_setup_PEs(struct pci_bus *bus)
805 806
{
	struct pci_dev *dev;
807 808

	pnv_ioda_setup_bus_PE(bus, 0);
809 810

	list_for_each_entry(dev, &bus->devices, bus_list) {
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
		if (dev->subordinate) {
			if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
				pnv_ioda_setup_bus_PE(dev->subordinate, 1);
			else
				pnv_ioda_setup_PEs(dev->subordinate);
		}
	}
}

/*
 * Configure PEs so that the downstream PCI buses and devices
 * could have their associated PE#. Unfortunately, we didn't
 * figure out the way to identify the PLX bridge yet. So we
 * simply put the PCI bus and the subordinate behind the root
 * port to PE# here. The game rule here is expected to be changed
 * as soon as we can detected PLX bridge correctly.
 */
828
static void pnv_pci_ioda_setup_PEs(void)
829 830
{
	struct pci_controller *hose, *tmp;
831
	struct pnv_phb *phb;
832 833

	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
834 835 836 837 838 839
		phb = hose->private_data;

		/* M64 layout might affect PE allocation */
		if (phb->alloc_m64_pe)
			phb->alloc_m64_pe(phb);

840
		pnv_ioda_setup_PEs(hose->bus);
841 842 843
	}
}

844
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
845
{
846
	struct pci_dn *pdn = pci_get_pdn(pdev);
847
	struct pnv_ioda_pe *pe;
848

849 850 851 852 853 854 855
	/*
	 * The function can be called while the PE#
	 * hasn't been assigned. Do nothing for the
	 * case.
	 */
	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
		return;
856

857
	pe = &phb->ioda.pe_array[pdn->pe_number];
858
	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
859
	set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
860 861
}

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
				     struct pci_dev *pdev, u64 dma_mask)
{
	struct pci_dn *pdn = pci_get_pdn(pdev);
	struct pnv_ioda_pe *pe;
	uint64_t top;
	bool bypass = false;

	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
		return -ENODEV;;

	pe = &phb->ioda.pe_array[pdn->pe_number];
	if (pe->tce_bypass_enabled) {
		top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
		bypass = (dma_mask >= top);
	}

	if (bypass) {
		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
		set_dma_ops(&pdev->dev, &dma_direct_ops);
		set_dma_offset(&pdev->dev, pe->tce_bypass_base);
	} else {
		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
		set_dma_ops(&pdev->dev, &dma_iommu_ops);
		set_iommu_table_base(&pdev->dev, &pe->tce32_table);
	}
888
	*pdev->dev.dma_mask = dma_mask;
889 890 891
	return 0;
}

892 893 894
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
				   struct pci_bus *bus,
				   bool add_to_iommu_group)
895 896 897 898
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
899 900 901 902 903 904
		if (add_to_iommu_group)
			set_iommu_table_base_and_group(&dev->dev,
						       &pe->tce32_table);
		else
			set_iommu_table_base(&dev->dev, &pe->tce32_table);

905
		if (dev->subordinate)
906 907
			pnv_ioda_setup_bus_dma(pe, dev->subordinate,
					       add_to_iommu_group);
908 909 910
	}
}

911 912
static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
					 struct iommu_table *tbl,
913
					 __be64 *startp, __be64 *endp, bool rm)
914
{
915 916 917
	__be64 __iomem *invalidate = rm ?
		(__be64 __iomem *)pe->tce_inval_reg_phys :
		(__be64 __iomem *)tbl->it_index;
918
	unsigned long start, end, inc;
919
	const unsigned shift = tbl->it_page_shift;
920 921 922 923 924 925

	start = __pa(startp);
	end = __pa(endp);

	/* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
	if (tbl->it_busno) {
926 927 928
		start <<= shift;
		end <<= shift;
		inc = 128ull << shift;
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
		start |= tbl->it_busno;
		end |= tbl->it_busno;
	} else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
		/* p7ioc-style invalidation, 2 TCEs per write */
		start |= (1ull << 63);
		end |= (1ull << 63);
		inc = 16;
        } else {
		/* Default (older HW) */
                inc = 128;
	}

        end |= inc - 1;	/* round up end to be different than start */

        mb(); /* Ensure above stores are visible */
        while (start <= end) {
945
		if (rm)
946
			__raw_rm_writeq(cpu_to_be64(start), invalidate);
947
		else
948
			__raw_writeq(cpu_to_be64(start), invalidate);
949 950 951 952 953 954 955 956 957 958 959
                start += inc;
        }

	/*
	 * The iommu layer will do another mb() for us on build()
	 * and we don't care on free()
	 */
}

static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
					 struct iommu_table *tbl,
960
					 __be64 *startp, __be64 *endp, bool rm)
961 962
{
	unsigned long start, end, inc;
963 964 965
	__be64 __iomem *invalidate = rm ?
		(__be64 __iomem *)pe->tce_inval_reg_phys :
		(__be64 __iomem *)tbl->it_index;
966
	const unsigned shift = tbl->it_page_shift;
967 968

	/* We'll invalidate DMA address in PE scope */
969
	start = 0x2ull << 60;
970 971 972 973 974
	start |= (pe->pe_number & 0xFF);
	end = start;

	/* Figure out the start, end and step */
	inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
975
	start |= (inc << shift);
976
	inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
977 978
	end |= (inc << shift);
	inc = (0x1ull << shift);
979 980 981
	mb();

	while (start <= end) {
982
		if (rm)
983
			__raw_rm_writeq(cpu_to_be64(start), invalidate);
984
		else
985
			__raw_writeq(cpu_to_be64(start), invalidate);
986 987 988 989 990
		start += inc;
	}
}

void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
991
				 __be64 *startp, __be64 *endp, bool rm)
992 993 994 995 996 997
{
	struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
					      tce32_table);
	struct pnv_phb *phb = pe->phb;

	if (phb->type == PNV_PHB_IODA1)
998
		pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
999
	else
1000
		pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
1001 1002
}

1003 1004 1005
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
				      struct pnv_ioda_pe *pe, unsigned int base,
				      unsigned int segs)
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
{

	struct page *tce_mem = NULL;
	const __be64 *swinvp;
	struct iommu_table *tbl;
	unsigned int i;
	int64_t rc;
	void *addr;

	/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
#define TCE32_TABLE_SIZE	((0x10000000 / 0x1000) * 8)

	/* XXX FIXME: Handle 64-bit only DMA devices */
	/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
	/* XXX FIXME: Allocate multi-level tables on PHB3 */

	/* We shouldn't already have a 32-bit DMA associated */
	if (WARN_ON(pe->tce32_seg >= 0))
		return;

	/* Grab a 32-bit TCE table */
	pe->tce32_seg = base;
	pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
		(base << 28), ((base + segs) << 28) - 1);

	/* XXX Currently, we allocate one big contiguous table for the
	 * TCEs. We only really need one chunk per 256M of TCE space
	 * (ie per segment) but that's an optimization for later, it
	 * requires some added smarts with our get/put_tce implementation
	 */
	tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
				   get_order(TCE32_TABLE_SIZE * segs));
	if (!tce_mem) {
		pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
		goto fail;
	}
	addr = page_address(tce_mem);
	memset(addr, 0, TCE32_TABLE_SIZE * segs);

	/* Configure HW */
	for (i = 0; i < segs; i++) {
		rc = opal_pci_map_pe_dma_window(phb->opal_id,
					      pe->pe_number,
					      base + i, 1,
					      __pa(addr) + TCE32_TABLE_SIZE * i,
					      TCE32_TABLE_SIZE, 0x1000);
		if (rc) {
			pe_err(pe, " Failed to configure 32-bit TCE table,"
			       " err %ld\n", rc);
			goto fail;
		}
	}

	/* Setup linux iommu table */
	tbl = &pe->tce32_table;
	pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
1062
				  base << 28, IOMMU_PAGE_SHIFT_4K);
1063 1064 1065 1066 1067 1068 1069 1070 1071

	/* OPAL variant of P7IOC SW invalidated TCEs */
	swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
	if (swinvp) {
		/* We need a couple more fields -- an address and a data
		 * to or.  Since the bus is only printed out on table free
		 * errors, and on the first pass the data will be a relative
		 * bus number, print that out instead.
		 */
1072 1073 1074
		pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
		tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
				8);
1075 1076 1077
		tbl->it_type |= (TCE_PCI_SWINV_CREATE |
				 TCE_PCI_SWINV_FREE   |
				 TCE_PCI_SWINV_PAIR);
1078 1079
	}
	iommu_init_table(tbl, phb->hose->node);
1080
	iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1081

1082
	if (pe->pdev)
1083
		set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
1084
	else
1085
		pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095
	return;
 fail:
	/* XXX Failure: Try to fallback to 64-bit only ? */
	if (pe->tce32_seg >= 0)
		pe->tce32_seg = -1;
	if (tce_mem)
		__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
	struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
					      tce32_table);
	uint16_t window_id = (pe->pe_number << 1 ) + 1;
	int64_t rc;

	pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
	if (enable) {
		phys_addr_t top = memblock_end_of_DRAM();

		top = roundup_pow_of_two(top);
		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
						     pe->pe_number,
						     window_id,
						     pe->tce_bypass_base,
						     top);
	} else {
		rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
						     pe->pe_number,
						     window_id,
						     pe->tce_bypass_base,
						     0);

		/*
1121 1122 1123 1124
		 * EEH needs the mapping between IOMMU table and group
		 * of those VFIO/KVM pass-through devices. We can postpone
		 * resetting DMA ops until the DMA mask is configured in
		 * host side.
1125
		 */
1126 1127 1128 1129
		if (pe->pdev)
			set_iommu_table_base(&pe->pdev->dev, tbl);
		else
			pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	}
	if (rc)
		pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
	else
		pe->tce_bypass_enabled = enable;
}

static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
					  struct pnv_ioda_pe *pe)
{
	/* TVE #1 is selected by PCI address bit 59 */
	pe->tce_bypass_base = 1ull << 59;

	/* Install set_bypass callback for VFIO */
	pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;

	/* Enable bypass by default */
	pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
}

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
				       struct pnv_ioda_pe *pe)
{
	struct page *tce_mem = NULL;
	void *addr;
	const __be64 *swinvp;
	struct iommu_table *tbl;
	unsigned int tce_table_size, end;
	int64_t rc;

	/* We shouldn't already have a 32-bit DMA associated */
	if (WARN_ON(pe->tce32_seg >= 0))
		return;

	/* The PE will reserve all possible 32-bits space */
	pe->tce32_seg = 0;
	end = (1 << ilog2(phb->ioda.m32_pci_base));
	tce_table_size = (end / 0x1000) * 8;
	pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
		end);

	/* Allocate TCE table */
	tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
				   get_order(tce_table_size));
	if (!tce_mem) {
		pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
		goto fail;
	}
	addr = page_address(tce_mem);
	memset(addr, 0, tce_table_size);

	/*
	 * Map TCE table through TVT. The TVE index is the PE number
	 * shifted by 1 bit for 32-bits DMA space.
	 */
	rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
					pe->pe_number << 1, 1, __pa(addr),
					tce_table_size, 0x1000);
	if (rc) {
		pe_err(pe, "Failed to configure 32-bit TCE table,"
		       " err %ld\n", rc);
		goto fail;
	}

	/* Setup linux iommu table */
	tbl = &pe->tce32_table;
1196 1197
	pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
			IOMMU_PAGE_SHIFT_4K);
1198 1199 1200 1201 1202 1203 1204 1205 1206

	/* OPAL variant of PHB3 invalidated TCEs */
	swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
	if (swinvp) {
		/* We need a couple more fields -- an address and a data
		 * to or.  Since the bus is only printed out on table free
		 * errors, and on the first pass the data will be a relative
		 * bus number, print that out instead.
		 */
1207 1208 1209
		pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
		tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
				8);
1210
		tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
1211 1212
	}
	iommu_init_table(tbl, phb->hose->node);
1213
	iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1214

1215
	if (pe->pdev)
1216
		set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
1217
	else
1218
		pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
1219

1220 1221
	/* Also create a bypass window */
	pnv_pci_ioda2_setup_bypass_pe(phb, pe);
1222 1223 1224 1225 1226 1227 1228 1229
	return;
fail:
	if (pe->tce32_seg >= 0)
		pe->tce32_seg = -1;
	if (tce_mem)
		__free_pages(tce_mem, get_order(tce_table_size));
}

1230
static void pnv_ioda_setup_dma(struct pnv_phb *phb)
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
{
	struct pci_controller *hose = phb->hose;
	unsigned int residual, remaining, segs, tw, base;
	struct pnv_ioda_pe *pe;

	/* If we have more PE# than segments available, hand out one
	 * per PE until we run out and let the rest fail. If not,
	 * then we assign at least one segment per PE, plus more based
	 * on the amount of devices under that PE
	 */
	if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
		residual = 0;
	else
		residual = phb->ioda.tce32_count -
			phb->ioda.dma_pe_count;

	pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
		hose->global_number, phb->ioda.tce32_count);
	pr_info("PCI: %d PE# for a total weight of %d\n",
		phb->ioda.dma_pe_count, phb->ioda.dma_weight);

	/* Walk our PE list and configure their DMA segments, hand them
	 * out one base segment plus any residual segments based on
	 * weight
	 */
	remaining = phb->ioda.tce32_count;
	tw = phb->ioda.dma_weight;
	base = 0;
1259
	list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
		if (!pe->dma_weight)
			continue;
		if (!remaining) {
			pe_warn(pe, "No DMA32 resources available\n");
			continue;
		}
		segs = 1;
		if (residual) {
			segs += ((pe->dma_weight * residual)  + (tw / 2)) / tw;
			if (segs > remaining)
				segs = remaining;
		}
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287

		/*
		 * For IODA2 compliant PHB3, we needn't care about the weight.
		 * The all available 32-bits DMA space will be assigned to
		 * the specific PE.
		 */
		if (phb->type == PNV_PHB_IODA1) {
			pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
				pe->dma_weight, segs);
			pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
		} else {
			pe_info(pe, "Assign DMA32 space\n");
			segs = 0;
			pnv_pci_ioda2_setup_dma_pe(phb, pe);
		}

1288 1289 1290 1291 1292 1293
		remaining -= segs;
		base += segs;
	}
}

#ifdef CONFIG_PCI_MSI
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
static void pnv_ioda2_msi_eoi(struct irq_data *d)
{
	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
	struct irq_chip *chip = irq_data_get_irq_chip(d);
	struct pnv_phb *phb = container_of(chip, struct pnv_phb,
					   ioda.irq_chip);
	int64_t rc;

	rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
	WARN_ON_ONCE(rc);

	icp_native_eoi(d);
}

1308
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
1309 1310
				  unsigned int hwirq, unsigned int virq,
				  unsigned int is_64, struct msi_msg *msg)
1311 1312
{
	struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
1313
	struct pci_dn *pdn = pci_get_pdn(dev);
1314 1315
	struct irq_data *idata;
	struct irq_chip *ichip;
1316
	unsigned int xive_num = hwirq - phb->msi_base;
1317
	__be32 data;
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	int rc;

	/* No PE assigned ? bail out ... no MSI for you ! */
	if (pe == NULL)
		return -ENXIO;

	/* Check if we have an MVE */
	if (pe->mve_number < 0)
		return -ENXIO;

1328 1329 1330 1331
	/* Force 32-bit MSI on some broken devices */
	if (pdn && pdn->force_32bit_msi)
		is_64 = 0;

1332 1333 1334 1335 1336 1337 1338 1339 1340
	/* Assign XIVE to PE */
	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
	if (rc) {
		pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
			pci_name(dev), rc, xive_num);
		return -EIO;
	}

	if (is_64) {
1341 1342
		__be64 addr64;

1343 1344 1345 1346 1347 1348 1349
		rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
				     &addr64, &data);
		if (rc) {
			pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
				pci_name(dev), rc);
			return -EIO;
		}
1350 1351
		msg->address_hi = be64_to_cpu(addr64) >> 32;
		msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
1352
	} else {
1353 1354
		__be32 addr32;

1355 1356 1357 1358 1359 1360 1361 1362
		rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
				     &addr32, &data);
		if (rc) {
			pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
				pci_name(dev), rc);
			return -EIO;
		}
		msg->address_hi = 0;
1363
		msg->address_lo = be32_to_cpu(addr32);
1364
	}
1365
	msg->data = be32_to_cpu(data);
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
	/*
	 * Change the IRQ chip for the MSI interrupts on PHB3.
	 * The corresponding IRQ chip should be populated for
	 * the first time.
	 */
	if (phb->type == PNV_PHB_IODA2) {
		if (!phb->ioda.irq_chip_init) {
			idata = irq_get_irq_data(virq);
			ichip = irq_data_get_irq_chip(idata);
			phb->ioda.irq_chip_init = 1;
			phb->ioda.irq_chip = *ichip;
			phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
		}

		irq_set_chip(virq, &phb->ioda.irq_chip);
	}

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
		 " address=%x_%08x data=%x PE# %d\n",
		 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
		 msg->address_hi, msg->address_lo, data, pe->pe_number);

	return 0;
}

static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
{
1394
	unsigned int count;
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
	const __be32 *prop = of_get_property(phb->hose->dn,
					     "ibm,opal-msi-ranges", NULL);
	if (!prop) {
		/* BML Fallback */
		prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
	}
	if (!prop)
		return;

	phb->msi_base = be32_to_cpup(prop);
1405 1406
	count = be32_to_cpup(prop + 1);
	if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
1407 1408 1409 1410
		pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
		       phb->hose->global_number);
		return;
	}
1411

1412 1413 1414
	phb->msi_setup = pnv_pci_ioda_msi_setup;
	phb->msi32_support = 1;
	pr_info("  Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
1415
		count, phb->msi_base);
1416 1417 1418 1419 1420
}
#else
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */

1421 1422 1423 1424 1425
/*
 * This function is supposed to be called on basis of PE from top
 * to bottom style. So the the I/O or MMIO segment assigned to
 * parent PE could be overrided by its child PEs if necessary.
 */
1426 1427
static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
				  struct pnv_ioda_pe *pe)
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
{
	struct pnv_phb *phb = hose->private_data;
	struct pci_bus_region region;
	struct resource *res;
	int i, index;
	int rc;

	/*
	 * NOTE: We only care PCI bus based PE for now. For PCI
	 * device based PE, for example SRIOV sensitive VF should
	 * be figured out later.
	 */
	BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));

	pci_bus_for_each_resource(pe->pbus, res, i) {
		if (!res || !res->flags ||
		    res->start > res->end)
			continue;

		if (res->flags & IORESOURCE_IO) {
			region.start = res->start - phb->ioda.io_pci_base;
			region.end   = res->end - phb->ioda.io_pci_base;
			index = region.start / phb->ioda.io_segsize;

			while (index < phb->ioda.total_pe &&
			       region.start <= region.end) {
				phb->ioda.io_segmap[index] = pe->pe_number;
				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
					pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
				if (rc != OPAL_SUCCESS) {
					pr_err("%s: OPAL error %d when mapping IO "
					       "segment #%d to PE#%d\n",
					       __func__, rc, index, pe->pe_number);
					break;
				}

				region.start += phb->ioda.io_segsize;
				index++;
			}
		} else if (res->flags & IORESOURCE_MEM) {
			region.start = res->start -
1469
				       hose->mem_offset[0] -
1470 1471
				       phb->ioda.m32_pci_base;
			region.end   = res->end -
1472
				       hose->mem_offset[0] -
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
				       phb->ioda.m32_pci_base;
			index = region.start / phb->ioda.m32_segsize;

			while (index < phb->ioda.total_pe &&
			       region.start <= region.end) {
				phb->ioda.m32_segmap[index] = pe->pe_number;
				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
					pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
				if (rc != OPAL_SUCCESS) {
					pr_err("%s: OPAL error %d when mapping M32 "
					       "segment#%d to PE#%d",
					       __func__, rc, index, pe->pe_number);
					break;
				}

				region.start += phb->ioda.m32_segsize;
				index++;
			}
		}
	}
}

1495
static void pnv_pci_ioda_setup_seg(void)
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
{
	struct pci_controller *tmp, *hose;
	struct pnv_phb *phb;
	struct pnv_ioda_pe *pe;

	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
		phb = hose->private_data;
		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
			pnv_ioda_setup_pe_seg(hose, pe);
		}
	}
}

1509
static void pnv_pci_ioda_setup_DMA(void)
1510 1511
{
	struct pci_controller *hose, *tmp;
1512
	struct pnv_phb *phb;
1513 1514 1515

	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
		pnv_ioda_setup_dma(hose->private_data);
1516 1517 1518 1519

		/* Mark the PHB initialization done */
		phb = hose->private_data;
		phb->initialized = 1;
1520 1521 1522
	}
}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
static void pnv_pci_ioda_create_dbgfs(void)
{
#ifdef CONFIG_DEBUG_FS
	struct pci_controller *hose, *tmp;
	struct pnv_phb *phb;
	char name[16];

	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
		phb = hose->private_data;

		sprintf(name, "PCI%04x", hose->global_number);
		phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
		if (!phb->dbgfs)
			pr_warning("%s: Error on creating debugfs on PHB#%x\n",
				__func__, hose->global_number);
	}
#endif /* CONFIG_DEBUG_FS */
}

1542
static void pnv_pci_ioda_fixup(void)
1543 1544
{
	pnv_pci_ioda_setup_PEs();
1545
	pnv_pci_ioda_setup_seg();
1546
	pnv_pci_ioda_setup_DMA();
1547

1548 1549
	pnv_pci_ioda_create_dbgfs();

1550 1551
#ifdef CONFIG_EEH
	eeh_init();
M
Mike Qiu 已提交
1552
	eeh_addr_cache_build();
1553
#endif
1554 1555
}

1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
/*
 * Returns the alignment for I/O or memory windows for P2P
 * bridges. That actually depends on how PEs are segmented.
 * For now, we return I/O or M32 segment size for PE sensitive
 * P2P bridges. Otherwise, the default values (4KiB for I/O,
 * 1MiB for memory) will be returned.
 *
 * The current PCI bus might be put into one PE, which was
 * create against the parent PCI bridge. For that case, we
 * needn't enlarge the alignment so that we can save some
 * resources.
 */
static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
						unsigned long type)
{
	struct pci_dev *bridge;
	struct pci_controller *hose = pci_bus_to_host(bus);
	struct pnv_phb *phb = hose->private_data;
	int num_pci_bridges = 0;

	bridge = bus->self;
	while (bridge) {
		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
			num_pci_bridges++;
			if (num_pci_bridges >= 2)
				return 1;
		}

		bridge = bridge->bus->self;
	}

1587 1588 1589 1590
	/* We fail back to M32 if M64 isn't supported */
	if (phb->ioda.m64_segsize &&
	    pnv_pci_is_mem_pref_64(type))
		return phb->ioda.m64_segsize;
1591 1592 1593 1594 1595 1596
	if (type & IORESOURCE_MEM)
		return phb->ioda.m32_segsize;

	return phb->ioda.io_segsize;
}

1597 1598 1599
/* Prevent enabling devices for which we couldn't properly
 * assign a PE
 */
1600
static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1601
{
1602 1603 1604
	struct pci_controller *hose = pci_bus_to_host(dev->bus);
	struct pnv_phb *phb = hose->private_data;
	struct pci_dn *pdn;
1605

1606 1607 1608 1609 1610 1611 1612 1613
	/* The function is probably called while the PEs have
	 * not be created yet. For example, resource reassignment
	 * during PCI probe period. We just skip the check if
	 * PEs isn't ready.
	 */
	if (!phb->initialized)
		return 0;

1614
	pdn = pci_get_pdn(dev);
1615 1616
	if (!pdn || pdn->pe_number == IODA_INVALID_PE)
		return -EINVAL;
1617

1618 1619 1620 1621 1622 1623 1624 1625 1626
	return 0;
}

static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
			       u32 devfn)
{
	return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}

1627 1628 1629 1630 1631 1632
static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
{
	opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
		       OPAL_ASSERT_RESET);
}

1633 1634
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
					 u64 hub_id, int ioda_type)
1635 1636 1637
{
	struct pci_controller *hose;
	struct pnv_phb *phb;
1638
	unsigned long size, m32map_off, pemap_off, iomap_off = 0;
1639
	const __be64 *prop64;
1640
	const __be32 *prop32;
1641
	int len;
1642 1643 1644 1645
	u64 phb_id;
	void *aux;
	long rc;

1646
	pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656

	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
	if (!prop64) {
		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
		return;
	}
	phb_id = be64_to_cpup(prop64);
	pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);

	phb = alloc_bootmem(sizeof(struct pnv_phb));
1657 1658 1659
	if (!phb) {
		pr_err("  Out of memory !\n");
		return;
1660
	}
1661 1662 1663 1664 1665 1666

	/* Allocate PCI controller */
	memset(phb, 0, sizeof(struct pnv_phb));
	phb->hose = hose = pcibios_alloc_controller(np);
	if (!phb->hose) {
		pr_err("  Can't allocate PCI controller for %s\n",
1667
		       np->full_name);
1668
		free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
1669 1670 1671 1672
		return;
	}

	spin_lock_init(&phb->lock);
1673 1674
	prop32 = of_get_property(np, "bus-range", &len);
	if (prop32 && len == 8) {
1675 1676
		hose->first_busno = be32_to_cpu(prop32[0]);
		hose->last_busno = be32_to_cpu(prop32[1]);
1677 1678 1679 1680 1681
	} else {
		pr_warn("  Broken <bus-range> on %s\n", np->full_name);
		hose->first_busno = 0;
		hose->last_busno = 0xff;
	}
1682
	hose->private_data = phb;
1683
	phb->hub_id = hub_id;
1684
	phb->opal_id = phb_id;
G
Gavin Shan 已提交
1685
	phb->type = ioda_type;
1686

1687 1688 1689
	/* Detect specific models for error handling */
	if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
		phb->model = PNV_PHB_MODEL_P7IOC;
1690
	else if (of_device_is_compatible(np, "ibm,power8-pciex"))
G
Gavin Shan 已提交
1691
		phb->model = PNV_PHB_MODEL_PHB3;
1692 1693 1694
	else
		phb->model = PNV_PHB_MODEL_UNKNOWN;

G
Gavin Shan 已提交
1695
	/* Parse 32-bit and IO ranges (if any) */
1696
	pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
1697

G
Gavin Shan 已提交
1698
	/* Get registers */
1699 1700 1701 1702 1703
	phb->regs = of_iomap(np, 0);
	if (phb->regs == NULL)
		pr_err("  Failed to map registers !\n");

	/* Initialize more IODA stuff */
1704
	phb->ioda.total_pe = 1;
G
Gavin Shan 已提交
1705
	prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1706
	if (prop32)
1707
		phb->ioda.total_pe = be32_to_cpup(prop32);
1708 1709 1710
	prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
	if (prop32)
		phb->ioda.reserved_pe = be32_to_cpup(prop32);
1711 1712 1713 1714

	/* Parse 64-bit MMIO range */
	pnv_ioda_parse_m64_window(phb);

1715
	phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
G
Gavin Shan 已提交
1716
	/* FW Has already off top 64k of M32 space (MSI space) */
1717 1718 1719
	phb->ioda.m32_size += 0x10000;

	phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1720
	phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
1721 1722 1723 1724
	phb->ioda.io_size = hose->pci_io_size;
	phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
	phb->ioda.io_pci_base = 0; /* XXX calculate this ? */

1725
	/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1726 1727
	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
	m32map_off = size;
1728
	size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1729 1730 1731 1732
	if (phb->type == PNV_PHB_IODA1) {
		iomap_off = size;
		size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
	}
1733 1734 1735 1736 1737 1738
	pemap_off = size;
	size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
	aux = alloc_bootmem(size);
	memset(aux, 0, size);
	phb->ioda.pe_alloc = aux;
	phb->ioda.m32_segmap = aux + m32map_off;
1739 1740
	if (phb->type == PNV_PHB_IODA1)
		phb->ioda.io_segmap = aux + iomap_off;
1741
	phb->ioda.pe_array = aux + pemap_off;
1742
	set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
1743

1744
	INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1745 1746 1747 1748 1749
	INIT_LIST_HEAD(&phb->ioda.pe_list);

	/* Calculate how many 32-bit TCE segments we have */
	phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;

G
Gavin Shan 已提交
1750
#if 0 /* We should really do that ... */
1751 1752 1753 1754 1755 1756 1757 1758
	rc = opal_pci_set_phb_mem_window(opal->phb_id,
					 window_type,
					 window_num,
					 starting_real_address,
					 starting_pci_address,
					 segment_size);
#endif

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	pr_info("  %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
		phb->ioda.total_pe, phb->ioda.reserved_pe,
		phb->ioda.m32_size, phb->ioda.m32_segsize);
	if (phb->ioda.m64_size)
		pr_info("                 M64: 0x%lx [segment=0x%lx]\n",
			phb->ioda.m64_size, phb->ioda.m64_segsize);
	if (phb->ioda.io_size)
		pr_info("                  IO: 0x%x [segment=0x%x]\n",
			phb->ioda.io_size, phb->ioda.io_segsize);

1769 1770

	phb->hose->ops = &pnv_pci_ops;
G
Gavin Shan 已提交
1771 1772 1773
	phb->get_pe_state = pnv_ioda_get_pe_state;
	phb->freeze_pe = pnv_ioda_freeze_pe;
	phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
1774 1775 1776
#ifdef CONFIG_EEH
	phb->eeh_ops = &ioda_eeh_ops;
#endif
1777 1778 1779 1780 1781 1782

	/* Setup RID -> PE mapping function */
	phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;

	/* Setup TCEs */
	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1783
	phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
1784

1785 1786 1787
	/* Setup shutdown function for kexec */
	phb->shutdown = pnv_pci_ioda_shutdown;

1788 1789 1790
	/* Setup MSI support */
	pnv_pci_init_ioda_msis(phb);

1791 1792 1793 1794 1795 1796
	/*
	 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
	 * to let the PCI core do resource assignment. It's supposed
	 * that the PCI core will do correct I/O and MMIO alignment
	 * for the P2P bridge bars so that each PCI bus (excluding
	 * the child P2P bridges) can form individual PE.
1797
	 */
1798
	ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1799
	ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1800
	ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1801
	ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus;
1802
	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1803 1804

	/* Reset IODA tables to a clean state */
1805
	rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1806
	if (rc)
1807
		pr_warning("  OPAL Error %ld performing IODA table reset !\n", rc);
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818

	/* If we're running in kdump kerenl, the previous kerenl never
	 * shutdown PCI devices correctly. We already got IODA table
	 * cleaned out. So we have to issue PHB reset to stop all PCI
	 * transactions from previous kerenl.
	 */
	if (is_kdump_kernel()) {
		pr_info("  Issue PHB reset ...\n");
		ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
		ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
	}
1819 1820 1821 1822

	/* Configure M64 window */
	if (phb->init_m64 && phb->init_m64(phb))
		hose->mem_resources[1].flags = 0;
G
Gavin Shan 已提交
1823 1824
}

1825
void __init pnv_pci_init_ioda2_phb(struct device_node *np)
G
Gavin Shan 已提交
1826
{
1827
	pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1828 1829 1830 1831 1832
}

void __init pnv_pci_init_ioda_hub(struct device_node *np)
{
	struct device_node *phbn;
1833
	const __be64 *prop64;
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	u64 hub_id;

	pr_info("Probing IODA IO-Hub %s\n", np->full_name);

	prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
	if (!prop64) {
		pr_err(" Missing \"ibm,opal-hubid\" property !\n");
		return;
	}
	hub_id = be64_to_cpup(prop64);
	pr_devel(" HUB-ID : 0x%016llx\n", hub_id);

	/* Count child PHBs */
	for_each_child_of_node(np, phbn) {
		/* Look for IODA1 PHBs */
		if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1850
			pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
1851 1852
	}
}