pci_gx.c 42.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * Copyright 2012 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>

#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/byteorder.h>

#include <gxio/iorpc_globals.h>
#include <gxio/kiorpc.h>
#include <gxio/trio.h>
#include <gxio/iorpc_trio.h>
#include <hv/drv_trio_intf.h>

#include <arch/sim.h>

/*
43
 * This file containes the routines to search for PCI buses,
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
 * enumerate the buses, and configure any attached devices.
 */

#define DEBUG_PCI_CFG	0

#if DEBUG_PCI_CFG
#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
	pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
		size, val, bus, dev, func, offset & 0xFFF);
#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
	pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
		size, val, bus, dev, func, offset & 0xFFF);
#else
#define TRACE_CFG_WR(...)
#define TRACE_CFG_RD(...)
#endif

61
static int pci_probe = 1;
62 63

/* Information on the PCIe RC ports configuration. */
64
static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
65 66 67 68 69 70 71

/*
 * On some platforms with one or more Gx endpoint ports, we need to
 * delay the PCIe RC port probe for a few seconds to work around
 * a HW PCIe link-training bug. The exact delay is specified with
 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
 * where T is the TRIO instance number, P is the port number and S is
72 73
 * the delay in seconds. If the argument is specified, but the delay is
 * not provided, the value will be DEFAULT_RC_DELAY.
74
 */
75
static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
76 77 78 79

/* Default number of seconds that the PCIe RC port probe can be delayed. */
#define DEFAULT_RC_DELAY	10

80 81 82
/* The PCI I/O space size in each PCI domain. */
#define IO_SPACE_SIZE		0x10000

83 84 85 86 87 88 89 90 91
/* Provide shorter versions of some very long constant names. */
#define AUTO_CONFIG_RC	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
#define AUTO_CONFIG_RC_G1	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
#define AUTO_CONFIG_EP	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
#define AUTO_CONFIG_EP_G1	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
92 93

/* Array of the PCIe ports configuration info obtained from the BIB. */
94
struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
95

96 97
/* Number of configured TRIO instances. */
int num_trio_shims;
98 99 100 101 102 103 104 105 106 107 108 109 110

/* All drivers share the TRIO contexts defined here. */
gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];

/* Pointer to an array of PCIe RC controllers. */
struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
int num_rc_controllers;

static struct pci_ops tile_cfg_ops;

/* Mask of CPUs that should receive PCIe interrupts. */
static struct cpumask intr_cpus_map;

111
/* We don't need to worry about the alignment of resources. */
112
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
113 114
				       resource_size_t size,
				       resource_size_t align)
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
{
	return res->start;
}
EXPORT_SYMBOL(pcibios_align_resource);

/*
 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
 * For now, we simply send interrupts to non-dataplane CPUs.
 * We may implement methods to allow user to specify the target CPUs,
 * e.g. via boot arguments.
 */
static int tile_irq_cpu(int irq)
{
	unsigned int count;
	int i = 0;
	int cpu;

	count = cpumask_weight(&intr_cpus_map);
	if (unlikely(count == 0)) {
134
		pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
135 136 137 138 139 140 141 142 143 144 145
		return irq % (smp_height * smp_width);
	}

	count = irq % count;
	for_each_cpu(cpu, &intr_cpus_map) {
		if (i++ == count)
			break;
	}
	return cpu;
}

146
/* Open a file descriptor to the TRIO shim. */
147
static int tile_pcie_open(int trio_index)
148 149 150
{
	gxio_trio_context_t *context = &trio_contexts[trio_index];
	int ret;
151
	int mac;
152

153
	/* This opens a file descriptor to the TRIO shim. */
154 155
	ret = gxio_trio_init(context, trio_index);
	if (ret < 0)
156
		goto gxio_trio_init_failure;
157

158
	/* Allocate an ASID for the kernel. */
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	ret = gxio_trio_alloc_asids(context, 1, 0, 0);
	if (ret < 0) {
		pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
			trio_index);
		goto asid_alloc_failure;
	}

	context->asid = ret;

#ifdef USE_SHARED_PCIE_CONFIG_REGION
	/*
	 * Alloc a PIO region for config access, shared by all MACs per TRIO.
	 * This shouldn't fail since the kernel is supposed to the first
	 * client of the TRIO's PIO regions.
	 */
	ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
	if (ret < 0) {
		pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
			trio_index);
		goto pio_alloc_failure;
	}

	context->pio_cfg_index = ret;

	/*
	 * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
	 * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
	 */
	ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
		0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
	if (ret < 0) {
		pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
			trio_index);
		goto pio_alloc_failure;
	}
#endif

196
	/* Get the properties of the PCIe ports on this TRIO instance. */
197
	ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
198
	if (ret < 0) {
199 200
		pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
		       ret, trio_index);
201 202 203 204 205 206
		goto get_port_property_failure;
	}

	context->mmio_base_mac =
		iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
	if (context->mmio_base_mac == NULL) {
207 208
		pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
		       ret, trio_index);
209 210 211 212 213 214 215 216 217 218 219
		ret = -ENOMEM;

		goto trio_mmio_mapping_failure;
	}

	/* Check the port strap state which will override the BIB setting. */
	for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
		TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
		unsigned int reg_offset;

		/* Ignore ports that are not specified in the BIB. */
220 221
		if (!pcie_ports[trio_index].ports[mac].allow_rc &&
		    !pcie_ports[trio_index].ports[mac].allow_ep)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
			continue;

		reg_offset =
			(TRIO_PCIE_INTFC_PORT_CONFIG <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		port_config.word =
			__gxio_mmio_read(context->mmio_base_mac + reg_offset);

		if (port_config.strap_state != AUTO_CONFIG_RC &&
		    port_config.strap_state != AUTO_CONFIG_RC_G1) {
			/*
			 * If this is really intended to be an EP port, record
			 * it so that the endpoint driver will know about it.
			 */
			if (port_config.strap_state == AUTO_CONFIG_EP ||
			    port_config.strap_state == AUTO_CONFIG_EP_G1)
242
				pcie_ports[trio_index].ports[mac].allow_ep = 1;
243 244 245
		}
	}

246 247
	return ret;

248 249
trio_mmio_mapping_failure:
get_port_property_failure:
250 251 252 253 254
asid_alloc_failure:
#ifdef USE_SHARED_PCIE_CONFIG_REGION
pio_alloc_failure:
#endif
	hv_dev_close(context->fd);
255 256
gxio_trio_init_failure:
	context->fd = -1;
257 258 259 260

	return ret;
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static int __init tile_trio_init(void)
{
	int i;

	/* We loop over all the TRIO shims. */
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		if (tile_pcie_open(i) < 0)
			continue;
		num_trio_shims++;
	}

	return 0;
}
postcore_initcall(tile_trio_init);

276
static void tilegx_legacy_irq_ack(struct irq_data *d)
277 278 279 280
{
	__insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
}

281
static void tilegx_legacy_irq_mask(struct irq_data *d)
282 283 284 285
{
	__insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
}

286
static void tilegx_legacy_irq_unmask(struct irq_data *d)
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
{
	__insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
}

static struct irq_chip tilegx_legacy_irq_chip = {
	.name			= "tilegx_legacy_irq",
	.irq_ack		= tilegx_legacy_irq_ack,
	.irq_mask		= tilegx_legacy_irq_mask,
	.irq_unmask		= tilegx_legacy_irq_unmask,

	/* TBD: support set_affinity. */
};

/*
 * This is a wrapper function of the kernel level-trigger interrupt
 * handler handle_level_irq() for PCI legacy interrupts. The TRIO
 * is configured such that only INTx Assert interrupts are proxied
 * to Linux which just calls handle_level_irq() after clearing the
 * MAC INTx Assert status bit associated with this interrupt.
 */
307
static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
{
	struct pci_controller *controller = irq_desc_get_handler_data(desc);
	gxio_trio_context_t *trio_context = controller->trio;
	uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
	int mac = controller->mac;
	unsigned int reg_offset;
	uint64_t level_mask;

	handle_level_irq(irq, desc);

	/*
	 * Clear the INTx Level status, otherwise future interrupts are
	 * not sent.
	 */
	reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
		TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
		TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;

	__gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
}

/*
 * Create kernel irqs and set up the handlers for the legacy interrupts.
 * Also some minimum initialization for the MSI support.
 */
337
static int tile_init_irqs(struct pci_controller *controller)
338 339 340 341 342 343 344 345 346 347 348 349 350 351
{
	int i;
	int j;
	int irq;
	int result;

	cpumask_copy(&intr_cpus_map, cpu_online_mask);


	for (i = 0; i < 4; i++) {
		gxio_trio_context_t *context = controller->trio;
		int cpu;

		/* Ask the kernel to allocate an IRQ. */
352 353
		irq = irq_alloc_hwirq(-1);
		if (!irq) {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
			pr_err("PCI: no free irq vectors, failed for %d\n", i);
			goto free_irqs;
		}
		controller->irq_intx_table[i] = irq;

		/* Distribute the 4 IRQs to different tiles. */
		cpu = tile_irq_cpu(irq);

		/* Configure the TRIO intr binding for this IRQ. */
		result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
						      cpu_y(cpu), KERNEL_PL,
						      irq, controller->mac, i);
		if (result < 0) {
			pr_err("PCI: MAC intx config failed for %d\n", i);

			goto free_irqs;
		}

372
		/* Register the IRQ handler with the kernel. */
373 374 375 376 377 378 379 380 381 382
		irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
					trio_handle_level_irq);
		irq_set_chip_data(irq, (void *)(uint64_t)i);
		irq_set_handler_data(irq, controller);
	}

	return 0;

free_irqs:
	for (j = 0; j < i; j++)
383
		irq_free_hwirq(controller->irq_intx_table[j]);
384 385 386 387

	return -1;
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/*
 * Return 1 if the port is strapped to operate in RC mode.
 */
static int
strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
{
	TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
	unsigned int reg_offset;

	/* Check the port configuration. */
	reg_offset =
		(TRIO_PCIE_INTFC_PORT_CONFIG <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
	port_config.word =
		__gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);

	if (port_config.strap_state == AUTO_CONFIG_RC ||
	    port_config.strap_state == AUTO_CONFIG_RC_G1)
		return 1;
	else
		return 0;
}

414 415 416 417
/*
 * Find valid controllers and fill in pci_controller structs for each
 * of them.
 *
418
 * Return the number of controllers discovered.
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
 */
int __init tile_pci_init(void)
{
	int ctl_index = 0;
	int i, j;

	if (!pci_probe) {
		pr_info("PCI: disabled by boot argument\n");
		return 0;
	}

	pr_info("PCI: Searching for controllers...\n");

	if (num_trio_shims == 0 || sim_is_simulator())
		return 0;

	/*
436
	 * Now determine which PCIe ports are configured to operate in RC
437 438 439 440 441 442 443 444 445 446 447
	 * mode. There is a differece in the port configuration capability
	 * between the Gx36 and Gx72 devices.
	 *
	 * The Gx36 has configuration capability for each of the 3 PCIe
	 * interfaces (disable, auto endpoint, auto RC, etc.).
	 * On the Gx72, you can only select one of the 3 PCIe interfaces per
	 * TRIO to train automatically. Further, the allowable training modes
	 * are reduced to four options (auto endpoint, auto RC, stream x1,
	 * stream x4).
	 *
	 * For Gx36 ports, it must be allowed to be in RC mode by the
448 449
	 * Board Information Block, and the hardware strapping pins must be
	 * set to RC mode.
450 451 452 453 454 455 456
	 *
	 * For Gx72 ports, the port will operate in RC mode if either of the
	 * following is true:
	 * 1. It is allowed to be in RC mode by the Board Information Block,
	 *    and the BIB doesn't allow the EP mode.
	 * 2. It is allowed to be in either the RC or the EP mode by the BIB,
	 *    and the hardware strapping pin is set to RC mode.
457 458 459 460 461 462 463 464
	 */
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		gxio_trio_context_t *context = &trio_contexts[i];

		if (context->fd < 0)
			continue;

		for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
465 466 467 468 469 470 471 472 473 474 475 476
			int is_rc = 0;

			if (pcie_ports[i].is_gx72 &&
			    pcie_ports[i].ports[j].allow_rc) {
				if (!pcie_ports[i].ports[j].allow_ep ||
				    strapped_for_rc(context, j))
					is_rc = 1;
			} else if (pcie_ports[i].ports[j].allow_rc &&
				   strapped_for_rc(context, j)) {
				is_rc = 1;
			}
			if (is_rc) {
477 478 479 480 481 482
				pcie_rc[i][j] = 1;
				num_rc_controllers++;
			}
		}
	}

483
	/* Return if no PCIe ports are configured to operate in RC mode. */
484 485 486
	if (num_rc_controllers == 0)
		return 0;

487
	/* Set the TRIO pointer and MAC index for each PCIe RC port. */
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
			if (pcie_rc[i][j]) {
				pci_controllers[ctl_index].trio =
					&trio_contexts[i];
				pci_controllers[ctl_index].mac = j;
				pci_controllers[ctl_index].trio_index = i;
				ctl_index++;
				if (ctl_index == num_rc_controllers)
					goto out;
			}
		}
	}

out:
503
	/* Configure each PCIe RC port. */
504 505
	for (i = 0; i < num_rc_controllers; i++) {

506
		/* Configure the PCIe MAC to run in RC mode. */
507 508 509 510 511
		struct pci_controller *controller = &pci_controllers[i];

		controller->index = i;
		controller->ops = &tile_cfg_ops;

512 513 514 515 516 517 518 519 520 521 522
		controller->io_space.start = PCIBIOS_MIN_IO +
			(i * IO_SPACE_SIZE);
		controller->io_space.end = controller->io_space.start +
			IO_SPACE_SIZE - 1;
		BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
		controller->io_space.flags = IORESOURCE_IO;
		snprintf(controller->io_space_name,
			 sizeof(controller->io_space_name),
			 "PCI I/O domain %d", i);
		controller->io_space.name = controller->io_space_name;

523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
		/*
		 * The PCI memory resource is located above the PA space.
		 * For every host bridge, the BAR window or the MMIO aperture
		 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
		 * PA space.
		 */
		controller->mem_offset = TILE_PCI_MEM_START +
			(i * TILE_PCI_BAR_WINDOW_TOP);
		controller->mem_space.start = controller->mem_offset +
			TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
		controller->mem_space.end = controller->mem_offset +
			TILE_PCI_BAR_WINDOW_TOP - 1;
		controller->mem_space.flags = IORESOURCE_MEM;
		snprintf(controller->mem_space_name,
			 sizeof(controller->mem_space_name),
			 "PCI mem domain %d", i);
		controller->mem_space.name = controller->mem_space_name;
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	}

	return num_rc_controllers;
}

/*
 * (pin - 1) converts from the PCI standard's [1:4] convention to
 * a normal [0:3] range.
 */
static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
{
	struct pci_controller *controller =
		(struct pci_controller *)dev->sysdata;
	return controller->irq_intx_table[pin - 1];
}

556
static void fixup_read_and_payload_sizes(struct pci_controller *controller)
557 558 559 560 561 562 563 564 565 566 567 568
{
	gxio_trio_context_t *trio_context = controller->trio;
	struct pci_bus *root_bus = controller->root_bus;
	TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
	TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
	unsigned int reg_offset;
	struct pci_bus *child;
	int mac;
	int err;

	mac = controller->mac;

569
	/* Set our max read request size to be 4KB. */
570 571 572 573 574 575 576 577
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CONTROL <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
578
					      reg_offset);
579 580
	dev_control.max_read_req_sz = 5;
	__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
581
			    dev_control.word);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

	/*
	 * Set the max payload size supported by this Gx PCIe MAC.
	 * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
	 * experiments have shown that setting MPS to 256 yields the
	 * best performance.
	 */
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CAP <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
597
					     reg_offset);
598 599
	rc_dev_cap.mps_sup = 1;
	__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
600
			    rc_dev_cap.word);
601 602

	/* Configure PCI Express MPS setting. */
603 604
	list_for_each_entry(child, &root_bus->children, node)
		pcie_bus_configure_settings(child);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622

	/*
	 * Set the mac_config register in trio based on the MPS/MRS of the link.
	 */
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CONTROL <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
						reg_offset);

	err = gxio_trio_set_mps_mrs(trio_context,
				    dev_control.max_payload_size,
				    dev_control.max_read_req_sz,
				    mac);
623
	if (err < 0) {
624 625
		pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
		       mac, controller->trio_index);
626 627 628
	}
}

629
static int setup_pcie_rc_delay(char *str)
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
{
	unsigned long delay = 0;
	unsigned long trio_index;
	unsigned long mac;

	if (str == NULL || !isdigit(*str))
		return -EINVAL;
	trio_index = simple_strtoul(str, (char **)&str, 10);
	if (trio_index >= TILEGX_NUM_TRIO)
		return -EINVAL;

	if (*str != ',')
		return -EINVAL;

	str++;
	if (!isdigit(*str))
		return -EINVAL;
	mac = simple_strtoul(str, (char **)&str, 10);
	if (mac >= TILEGX_TRIO_PCIES)
		return -EINVAL;

	if (*str != '\0') {
		if (*str != ',')
			return -EINVAL;

		str++;
		if (!isdigit(*str))
			return -EINVAL;
		delay = simple_strtoul(str, (char **)&str, 10);
	}

	rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
	return 0;
}
early_param("pcie_rc_delay", setup_pcie_rc_delay);

666
/* PCI initialization entry point, called by subsys_initcall. */
667 668 669 670
int __init pcibios_init(void)
{
	resource_size_t offset;
	LIST_HEAD(resources);
671
	int next_busno;
672 673
	int i;

674 675
	tile_pci_init();

676
	if (num_rc_controllers == 0)
677 678 679 680 681 682 683 684 685 686
		return 0;

	/*
	 * Delay a bit in case devices aren't ready.  Some devices are
	 * known to require at least 20ms here, but we use a more
	 * conservative value.
	 */
	msleep(250);

	/* Scan all of the recorded PCI controllers.  */
687
	for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
		struct pci_controller *controller = &pci_controllers[i];
		gxio_trio_context_t *trio_context = controller->trio;
		TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
		TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
		struct pci_bus *bus;
		unsigned int reg_offset;
		unsigned int class_code_revision;
		int trio_index;
		int mac;
		int ret;

		if (trio_context->fd < 0)
			continue;

		trio_index = controller->trio_index;
		mac = controller->mac;

		/*
706 707
		 * Check for PCIe link-up status to decide if we need
		 * to force the link to come up.
708 709
		 */
		reg_offset =
710
			(TRIO_PCIE_INTFC_PORT_STATUS <<
711 712
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
713
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
714 715
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

716
		port_status.word =
717 718
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
719 720
		if (!port_status.dl_up) {
			if (rc_delay[trio_index][mac]) {
721
				pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
722 723 724 725 726 727
					rc_delay[trio_index][mac], mac,
					trio_index);
				msleep(rc_delay[trio_index][mac] * 1000);
			}
			ret = gxio_trio_force_rc_link_up(trio_context, mac);
			if (ret < 0)
728 729
				pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
				       mac, trio_index);
730 731
		}

732 733
		pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
			i, trio_index, controller->mac);
734

735
		/* Delay the bus probe if needed. */
736
		if (rc_delay[trio_index][mac]) {
737 738
			pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
				rc_delay[trio_index][mac], mac, trio_index);
739 740 741 742 743 744 745 746
			msleep(rc_delay[trio_index][mac] * 1000);
		} else {
			/*
			 * Wait a bit here because some EP devices
			 * take longer to come up.
			 */
			msleep(1000);
		}
747

748
		/* Check for PCIe link-up status again. */
749 750 751 752
		port_status.word =
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
		if (!port_status.dl_up) {
753
			if (pcie_ports[trio_index].ports[mac].removable) {
754 755
				pr_info("PCI: link is down, MAC %d on TRIO %d\n",
					mac, trio_index);
756
				pr_info("This is expected if no PCIe card is connected to this link\n");
757 758
			} else
				pr_err("PCI: link is down, MAC %d on TRIO %d\n",
759
				       mac, trio_index);
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
			continue;
		}

		/*
		 * Ensure that the link can come out of L1 power down state.
		 * Strictly speaking, this is needed only in the case of
		 * heavy RC-initiated DMAs.
		 */
		reg_offset =
			(TRIO_PCIE_INTFC_TX_FIFO_CTL <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
		tx_fifo_ctl.word =
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
		tx_fifo_ctl.min_p_credits = 0;
		__gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
				  tx_fifo_ctl.word);

		/*
		 * Change the device ID so that Linux bus crawl doesn't confuse
		 * the internal bridge with any Tilera endpoints.
		 */
		reg_offset =
			(TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
				    (TILERA_GX36_RC_DEV_ID <<
				    TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
				    TILERA_VENDOR_ID);

797
		/* Set the internal P2P bridge class code. */
798 799 800 801 802 803 804 805 806 807
		reg_offset =
			(TRIO_PCIE_RC_REVISION_ID <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		class_code_revision =
			__gxio_mmio_read32(trio_context->mmio_base_mac +
					   reg_offset);
808 809
		class_code_revision = (class_code_revision & 0xff) |
			(PCI_CLASS_BRIDGE_PCI << 16);
810 811 812 813 814 815

		__gxio_mmio_write32(trio_context->mmio_base_mac +
				    reg_offset, class_code_revision);

#ifdef USE_SHARED_PCIE_CONFIG_REGION

816
		/* Map in the MMIO space for the PIO region. */
817 818 819 820 821 822
		offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
			(((unsigned long long)mac) <<
			TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);

#else

823
		/* Alloc a PIO region for PCI config access per MAC. */
824 825
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
826 827
			pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
			       mac, trio_index);
828 829 830 831 832 833

			continue;
		}

		trio_context->pio_cfg_index[mac] = ret;

834
		/* For PIO CFG, the bus_address_hi parameter is 0. */
835 836 837 838
		ret = gxio_trio_init_pio_region_aux(trio_context,
			trio_context->pio_cfg_index[mac],
			mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
		if (ret < 0) {
839 840
			pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
			       mac, trio_index);
841 842 843 844 845 846 847 848 849 850

			continue;
		}

		offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
			(((unsigned long long)mac) <<
			TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);

#endif

851 852 853 854 855 856
		/*
		 * To save VMALLOC space, we take advantage of the fact that
		 * bit 29 in the PIO CFG address format is reserved 0. With
		 * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
		 * this cuts VMALLOC space usage from 1GB to 512MB per mac.
		 */
857
		trio_context->mmio_base_pio_cfg[mac] =
858 859
			iorpc_ioremap(trio_context->fd, offset, (1UL <<
			(TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
860 861
		if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
			pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
862
			       mac, trio_index);
863 864 865 866

			continue;
		}

867
		/* Initialize the PCIe interrupts. */
868 869 870 871 872 873 874
		if (tile_init_irqs(controller)) {
			pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
				mac, trio_index);

			continue;
		}

875 876 877
		/*
		 * The PCI memory resource is located above the PA space.
		 * The memory range for the PCI root bus should not overlap
878
		 * with the physical RAM.
879
		 */
880 881
		pci_add_resource_offset(&resources, &controller->mem_space,
					controller->mem_offset);
882
		pci_add_resource(&resources, &controller->io_space);
883 884
		controller->first_busno = next_busno;
		bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
885 886
					controller, &resources);
		controller->root_bus = bus;
887
		next_busno = bus->busn_res.end + 1;
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
	}

	/* Do machine dependent PCI interrupt routing */
	pci_fixup_irqs(pci_common_swizzle, tile_map_irq);

	/*
	 * This comes from the generic Linux PCI driver.
	 *
	 * It allocates all of the resources (I/O memory, etc)
	 * associated with the devices read in above.
	 */
	pci_assign_unassigned_resources();

	/* Record the I/O resources in the PCI controller structure. */
	for (i = 0; i < num_rc_controllers; i++) {
		struct pci_controller *controller = &pci_controllers[i];
		gxio_trio_context_t *trio_context = controller->trio;
		struct pci_bus *root_bus = pci_controllers[i].root_bus;
		int ret;
		int j;

		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (root_bus == NULL)
			continue;

		/* Configure the max_payload_size values for this domain. */
		fixup_read_and_payload_sizes(controller);

919
		/* Alloc a PIO region for PCI memory access for each RC port. */
920 921
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
922 923
			pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
924 925 926 927 928 929 930 931 932 933 934 935 936

			continue;
		}

		controller->pio_mem_index = ret;

		/*
		 * For PIO MEM, the bus_address_hi parameter is hard-coded 0
		 * because we always assign 32-bit PCI bus BAR ranges.
		 */
		ret = gxio_trio_init_pio_region_aux(trio_context,
						    controller->pio_mem_index,
						    controller->mac,
937
						    0,
938 939
						    0);
		if (ret < 0) {
940 941
			pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
942 943 944 945

			continue;
		}

946 947 948 949 950 951
#ifdef CONFIG_TILE_PCI_IO
		/*
		 * Alloc a PIO region for PCI I/O space access for each RC port.
		 */
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
952 953
			pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969

			continue;
		}

		controller->pio_io_index = ret;

		/*
		 * For PIO IO, the bus_address_hi parameter is hard-coded 0
		 * because PCI I/O address space is 32-bit.
		 */
		ret = gxio_trio_init_pio_region_aux(trio_context,
						    controller->pio_io_index,
						    controller->mac,
						    0,
						    HV_TRIO_PIO_FLAG_IO_SPACE);
		if (ret < 0) {
970 971
			pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
972 973 974 975 976

			continue;
		}
#endif

977 978 979 980 981 982 983 984 985 986 987 988 989
		/*
		 * Configure a Mem-Map region for each memory controller so
		 * that Linux can map all of its PA space to the PCI bus.
		 * Use the IOMMU to handle hash-for-home memory.
		 */
		for_each_online_node(j) {
			unsigned long start_pfn = node_start_pfn[j];
			unsigned long end_pfn = node_end_pfn[j];
			unsigned long nr_pages = end_pfn - start_pfn;

			ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
							  0);
			if (ret < 0) {
990 991 992
				pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
				       controller->trio_index, controller->mac,
				       j);
993 994 995 996 997 998 999 1000 1001 1002

				goto alloc_mem_map_failed;
			}

			controller->mem_maps[j] = ret;

			/*
			 * Initialize the Mem-Map and the I/O MMU so that all
			 * the physical memory can be accessed by the endpoint
			 * devices. The base bus address is set to the base CPA
1003 1004
			 * of this memory controller plus an offset (see pci.h).
			 * The region's base VA is set to the base CPA. The
1005
			 * I/O MMU table essentially translates the CPA to
1006 1007 1008 1009
			 * the real PA. Implicitly, for node 0, we create
			 * a separate Mem-Map region that serves as the inbound
			 * window for legacy 32-bit devices. This is a direct
			 * map of the low 4GB CPA space.
1010 1011 1012 1013 1014 1015 1016
			 */
			ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
				controller->mem_maps[j],
				start_pfn << PAGE_SHIFT,
				nr_pages << PAGE_SHIFT,
				trio_context->asid,
				controller->mac,
1017 1018
				(start_pfn << PAGE_SHIFT) +
				TILE_PCI_MEM_MAP_BASE_OFFSET,
1019 1020 1021
				j,
				GXIO_TRIO_ORDER_MODE_UNORDERED);
			if (ret < 0) {
1022 1023 1024
				pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
				       controller->trio_index, controller->mac,
				       j);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038

				goto alloc_mem_map_failed;
			}
			continue;

alloc_mem_map_failed:
			break;
		}
	}

	return 0;
}
subsys_initcall(pcibios_init);

1039
/* No bus fixups needed. */
1040
void pcibios_fixup_bus(struct pci_bus *bus)
1041 1042 1043
{
}

1044
/* Process any "pci=" kernel boot arguments. */
1045
char *__init pcibios_setup(char *str)
1046 1047 1048 1049 1050 1051 1052 1053
{
	if (!strcmp(str, "off")) {
		pci_probe = 0;
		return NULL;
	}
	return str;
}

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
/*
 * Called for each device after PCI setup is done.
 * We initialize the PCI device capabilities conservatively, assuming that
 * all devices can only address the 32-bit DMA space. The exception here is
 * that the device dma_offset is set to the value that matches the 64-bit
 * capable devices. This is OK because dma_offset is not used by legacy
 * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
 * This implementation matches the kernel design of setting PCI devices'
 * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
 * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
 */
1065
static void pcibios_fixup_final(struct pci_dev *pdev)
1066
{
1067
	set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
1068 1069 1070
	set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
	pdev->dev.archdata.max_direct_dma_addr =
		TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1071
	pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1072 1073 1074
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
/* Map a PCI MMIO bus address into VA space. */
void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
{
	struct pci_controller *controller = NULL;
	resource_size_t bar_start;
	resource_size_t bar_end;
	resource_size_t offset;
	resource_size_t start;
	resource_size_t end;
	int trio_fd;
1085
	int i;
1086 1087 1088 1089 1090

	start = phys_addr;
	end = phys_addr + size - 1;

	/*
1091
	 * By searching phys_addr in each controller's mem_space, we can
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	 * determine the controller that should accept the PCI memory access.
	 */
	for (i = 0; i < num_rc_controllers; i++) {
		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (pci_controllers[i].root_bus == NULL)
			continue;

1102 1103
		bar_start = pci_controllers[i].mem_space.start;
		bar_end = pci_controllers[i].mem_space.end;
1104

1105 1106 1107
		if ((start >= bar_start) && (end <= bar_end)) {
			controller = &pci_controllers[i];
			break;
1108 1109 1110 1111 1112 1113 1114 1115
		}
	}

	if (controller == NULL)
		return NULL;

	trio_fd = controller->trio->fd;

1116 1117 1118 1119
	/* Convert the resource start to the bus address offset. */
	start = phys_addr - controller->mem_offset;

	offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
1120

1121
	/* We need to keep the PCI bus address's in-page offset in the VA. */
1122
	return iorpc_ioremap(trio_fd, offset, size) +
1123
		(start & (PAGE_SIZE - 1));
1124 1125 1126
}
EXPORT_SYMBOL(ioremap);

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
#ifdef CONFIG_TILE_PCI_IO
/* Map a PCI I/O address into VA space. */
void __iomem *ioport_map(unsigned long port, unsigned int size)
{
	struct pci_controller *controller = NULL;
	resource_size_t bar_start;
	resource_size_t bar_end;
	resource_size_t offset;
	resource_size_t start;
	resource_size_t end;
	int trio_fd;
	int i;

	start = port;
	end = port + size - 1;

	/*
1144 1145
	 * By searching the port in each controller's io_space, we can
	 * determine the controller that should accept the PCI I/O access.
1146 1147 1148 1149 1150 1151 1152 1153 1154
	 */
	for (i = 0; i < num_rc_controllers; i++) {
		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (pci_controllers[i].root_bus == NULL)
			continue;

1155 1156
		bar_start = pci_controllers[i].io_space.start;
		bar_end = pci_controllers[i].io_space.end;
1157 1158 1159

		if ((start >= bar_start) && (end <= bar_end)) {
			controller = &pci_controllers[i];
1160
			break;
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		}
	}

	if (controller == NULL)
		return NULL;

	trio_fd = controller->trio->fd;

	/* Convert the resource start to the bus address offset. */
	port -= controller->io_space.start;

	offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;

1174
	/* We need to keep the PCI bus address's in-page offset in the VA. */
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
}
EXPORT_SYMBOL(ioport_map);

void ioport_unmap(void __iomem *addr)
{
	iounmap(addr);
}
EXPORT_SYMBOL(ioport_unmap);
#endif

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
	iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);

/****************************************************************
 *
 * Tile PCI config space read/write routines
 *
 ****************************************************************/

/*
 * These are the normal read and write ops
 * These are expanded with macros from  pci_bus_read_config_byte() etc.
 *
 * devfn is the combined PCI device & function.
 *
 * offset is in bytes, from the start of config space for the
 * specified bus & device.
 */
1207 1208
static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
			 int size, u32 *val)
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
{
	struct pci_controller *controller = bus->sysdata;
	gxio_trio_context_t *trio_context = controller->trio;
	int busnum = bus->number & 0xff;
	int device = PCI_SLOT(devfn);
	int function = PCI_FUNC(devfn);
	int config_type = 1;
	TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
	void *mmio_addr;

	/*
1220
	 * Map all accesses to the local device on root bus into the
1221 1222 1223
	 * MMIO space of the MAC. Accesses to the downstream devices
	 * go to the PIO space.
	 */
1224
	if (pci_is_root_bus(bus)) {
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		if (device == 0) {
			/*
			 * This is the internal downstream P2P bridge,
			 * access directly.
			 */
			unsigned int reg_offset;

			reg_offset = ((offset & 0xFFF) <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
				(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
				<< TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
				(controller->mac <<
					TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

			mmio_addr = trio_context->mmio_base_mac + reg_offset;

			goto valid_device;

		} else {
			/*
			 * We fake an empty device for (device > 0),
			 * since there is only one device on bus 0.
			 */
			goto invalid_device;
		}
	}

	/*
1253
	 * Accesses to the directly attached device have to be
1254 1255
	 * sent as type-0 configs.
	 */
1256
	if (busnum == (controller->first_busno + 1)) {
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
		/*
		 * There is only one device off of our built-in P2P bridge.
		 */
		if (device != 0)
			goto invalid_device;

		config_type = 0;
	}

	cfg_addr.word = 0;
	cfg_addr.reg_addr = (offset & 0xFFF);
	cfg_addr.fn = function;
	cfg_addr.dev = device;
	cfg_addr.bus = busnum;
	cfg_addr.type = config_type;

	/*
	 * Note that we don't set the mac field in cfg_addr because the
	 * mapping is per port.
	 */
	mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1278
		cfg_addr.word;
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

valid_device:

	switch (size) {
	case 4:
		*val = __gxio_mmio_read32(mmio_addr);
		break;

	case 2:
		*val = __gxio_mmio_read16(mmio_addr);
		break;

	case 1:
		*val = __gxio_mmio_read8(mmio_addr);
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

	TRACE_CFG_RD(size, *val, busnum, device, function, offset);

	return 0;

invalid_device:

	switch (size) {
	case 4:
		*val = 0xFFFFFFFF;
		break;

	case 2:
		*val = 0xFFFF;
		break;

	case 1:
		*val = 0xFF;
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

	return 0;
}


/*
 * See tile_cfg_read() for relevent comments.
 * Note that "val" is the value to write, not a pointer to that value.
 */
1330 1331
static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
			  int size, u32 val)
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
{
	struct pci_controller *controller = bus->sysdata;
	gxio_trio_context_t *trio_context = controller->trio;
	int busnum = bus->number & 0xff;
	int device = PCI_SLOT(devfn);
	int function = PCI_FUNC(devfn);
	int config_type = 1;
	TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
	void *mmio_addr;
	u32 val_32 = (u32)val;
	u16 val_16 = (u16)val;
	u8 val_8 = (u8)val;

	/*
1346
	 * Map all accesses to the local device on root bus into the
1347 1348 1349
	 * MMIO space of the MAC. Accesses to the downstream devices
	 * go to the PIO space.
	 */
1350
	if (pci_is_root_bus(bus)) {
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		if (device == 0) {
			/*
			 * This is the internal downstream P2P bridge,
			 * access directly.
			 */
			unsigned int reg_offset;

			reg_offset = ((offset & 0xFFF) <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
				(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
				<< TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
				(controller->mac <<
					TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

			mmio_addr = trio_context->mmio_base_mac + reg_offset;

			goto valid_device;

		} else {
			/*
			 * We fake an empty device for (device > 0),
			 * since there is only one device on bus 0.
			 */
			goto invalid_device;
		}
	}

	/*
1379
	 * Accesses to the directly attached device have to be
1380 1381
	 * sent as type-0 configs.
	 */
1382
	if (busnum == (controller->first_busno + 1)) {
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
		/*
		 * There is only one device off of our built-in P2P bridge.
		 */
		if (device != 0)
			goto invalid_device;

		config_type = 0;
	}

	cfg_addr.word = 0;
	cfg_addr.reg_addr = (offset & 0xFFF);
	cfg_addr.fn = function;
	cfg_addr.dev = device;
	cfg_addr.bus = busnum;
	cfg_addr.type = config_type;

	/*
	 * Note that we don't set the mac field in cfg_addr because the
	 * mapping is per port.
	 */
	mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
			cfg_addr.word;

valid_device:

	switch (size) {
	case 4:
		__gxio_mmio_write32(mmio_addr, val_32);
		TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
		break;

	case 2:
		__gxio_mmio_write16(mmio_addr, val_16);
		TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
		break;

	case 1:
		__gxio_mmio_write8(mmio_addr, val_8);
		TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

invalid_device:

	return 0;
}


static struct pci_ops tile_cfg_ops = {
	.read =         tile_cfg_read,
	.write =        tile_cfg_write,
};


1440 1441
/* MSI support starts here. */
static unsigned int tilegx_msi_startup(struct irq_data *d)
1442 1443
{
	if (d->msi_desc)
1444
		pci_msi_unmask_irq(d);
1445 1446 1447 1448

	return 0;
}

1449
static void tilegx_msi_ack(struct irq_data *d)
1450 1451 1452 1453
{
	__insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
}

1454
static void tilegx_msi_mask(struct irq_data *d)
1455
{
1456
	pci_msi_mask_irq(d);
1457 1458 1459
	__insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
}

1460
static void tilegx_msi_unmask(struct irq_data *d)
1461 1462
{
	__insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1463
	pci_msi_unmask_irq(d);
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
}

static struct irq_chip tilegx_msi_chip = {
	.name			= "tilegx_msi",
	.irq_startup		= tilegx_msi_startup,
	.irq_ack		= tilegx_msi_ack,
	.irq_mask		= tilegx_msi_mask,
	.irq_unmask		= tilegx_msi_unmask,

	/* TBD: support set_affinity. */
};

int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	struct pci_controller *controller;
	gxio_trio_context_t *trio_context;
	struct msi_msg msg;
	int default_irq;
	uint64_t mem_map_base;
	uint64_t mem_map_limit;
	u64 msi_addr;
	int mem_map;
	int cpu;
	int irq;
	int ret;

1490 1491 1492
	irq = irq_alloc_hwirq(-1);
	if (!irq)
		return -ENOSPC;
1493 1494 1495 1496 1497 1498 1499 1500

	/*
	 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
	 * devices that are not capable of generating a 64-bit message address.
	 * These devices will fall back to using the legacy interrupts.
	 * Most PCIe endpoint devices do support 64-bit message addressing.
	 */
	if (desc->msi_attrib.is_64 == 0) {
1501
		dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514

		ret = -ENOMEM;
		goto is_64_failure;
	}

	default_irq = desc->msi_attrib.default_irq;
	controller = irq_get_handler_data(default_irq);

	BUG_ON(!controller);

	trio_context = controller->trio;

	/*
1515 1516 1517 1518
	 * Allocate a scatter-queue that will accept the MSI write and
	 * trigger the TILE-side interrupts. We use the scatter-queue regions
	 * before the mem map regions, because the latter are needed by more
	 * applications.
1519
	 */
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
	if (mem_map >= 0) {
		TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
			.pop = 0,
			.doorbell = 1,
		}};

		mem_map += TRIO_NUM_MAP_MEM_REGIONS;
		mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
			mem_map * MEM_MAP_INTR_REGION_SIZE;
		mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;

		msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
		msg.data = (unsigned int)doorbell_template.word;
	} else {
		/* SQ regions are out, allocate from map mem regions. */
		mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
		if (mem_map < 0) {
1538 1539
			dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
				 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1540 1541 1542
			ret = -ENOMEM;
			goto msi_mem_map_alloc_failure;
		}
1543

1544 1545 1546 1547 1548 1549 1550 1551
		mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
			mem_map * MEM_MAP_INTR_REGION_SIZE;
		mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;

		msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
			TRIO_MAP_MEM_REG_INT0;

		msg.data = mem_map;
1552 1553 1554 1555 1556 1557
	}

	/* We try to distribute different IRQs to different tiles. */
	cpu = tile_irq_cpu(irq);

	/*
1558
	 * Now call up to the HV to configure the MSI interrupt and
1559 1560 1561 1562 1563 1564 1565
	 * set up the IPI binding.
	 */
	ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
					KERNEL_PL, irq, controller->mac,
					mem_map, mem_map_base, mem_map_limit,
					trio_context->asid);
	if (ret < 0) {
1566
		dev_info(&pdev->dev, "HV MSI config failed\n");
1567 1568 1569 1570 1571 1572 1573 1574 1575

		goto hv_msi_config_failure;
	}

	irq_set_msi_desc(irq, desc);

	msg.address_hi = msi_addr >> 32;
	msg.address_lo = msi_addr & 0xffffffff;

1576
	pci_write_msi_msg(irq, &msg);
1577 1578 1579 1580 1581 1582 1583 1584 1585
	irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
	irq_set_handler_data(irq, controller);

	return 0;

hv_msi_config_failure:
	/* Free mem-map */
msi_mem_map_alloc_failure:
is_64_failure:
1586
	irq_free_hwirq(irq);
1587 1588 1589 1590 1591
	return ret;
}

void arch_teardown_msi_irq(unsigned int irq)
{
1592
	irq_free_hwirq(irq);
1593
}