pci_gx.c 42.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * Copyright 2012 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>

#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/byteorder.h>

#include <gxio/iorpc_globals.h>
#include <gxio/kiorpc.h>
#include <gxio/trio.h>
#include <gxio/iorpc_trio.h>
#include <hv/drv_trio_intf.h>

#include <arch/sim.h>

/*
A
Andrea Gelmini 已提交
43
 * This file contains the routines to search for PCI buses,
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
 * enumerate the buses, and configure any attached devices.
 */

#define DEBUG_PCI_CFG	0

#if DEBUG_PCI_CFG
#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
	pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
		size, val, bus, dev, func, offset & 0xFFF);
#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
	pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
		size, val, bus, dev, func, offset & 0xFFF);
#else
#define TRACE_CFG_WR(...)
#define TRACE_CFG_RD(...)
#endif

61
static int pci_probe = 1;
62 63

/* Information on the PCIe RC ports configuration. */
64
static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
65 66 67 68 69 70 71

/*
 * On some platforms with one or more Gx endpoint ports, we need to
 * delay the PCIe RC port probe for a few seconds to work around
 * a HW PCIe link-training bug. The exact delay is specified with
 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
 * where T is the TRIO instance number, P is the port number and S is
72 73
 * the delay in seconds. If the argument is specified, but the delay is
 * not provided, the value will be DEFAULT_RC_DELAY.
74
 */
75
static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
76 77 78 79

/* Default number of seconds that the PCIe RC port probe can be delayed. */
#define DEFAULT_RC_DELAY	10

80 81 82
/* The PCI I/O space size in each PCI domain. */
#define IO_SPACE_SIZE		0x10000

83 84 85 86 87 88 89 90 91
/* Provide shorter versions of some very long constant names. */
#define AUTO_CONFIG_RC	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
#define AUTO_CONFIG_RC_G1	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
#define AUTO_CONFIG_EP	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
#define AUTO_CONFIG_EP_G1	\
	TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
92 93

/* Array of the PCIe ports configuration info obtained from the BIB. */
94
struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
95

96 97
/* Number of configured TRIO instances. */
int num_trio_shims;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

/* All drivers share the TRIO contexts defined here. */
gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];

/* Pointer to an array of PCIe RC controllers. */
struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
int num_rc_controllers;

static struct pci_ops tile_cfg_ops;

/* Mask of CPUs that should receive PCIe interrupts. */
static struct cpumask intr_cpus_map;

/*
 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
 * For now, we simply send interrupts to non-dataplane CPUs.
 * We may implement methods to allow user to specify the target CPUs,
 * e.g. via boot arguments.
 */
static int tile_irq_cpu(int irq)
{
	unsigned int count;
	int i = 0;
	int cpu;

	count = cpumask_weight(&intr_cpus_map);
	if (unlikely(count == 0)) {
125
		pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
126 127 128 129 130 131 132 133 134 135 136
		return irq % (smp_height * smp_width);
	}

	count = irq % count;
	for_each_cpu(cpu, &intr_cpus_map) {
		if (i++ == count)
			break;
	}
	return cpu;
}

137
/* Open a file descriptor to the TRIO shim. */
138
static int tile_pcie_open(int trio_index)
139 140 141
{
	gxio_trio_context_t *context = &trio_contexts[trio_index];
	int ret;
142
	int mac;
143

144
	/* This opens a file descriptor to the TRIO shim. */
145 146
	ret = gxio_trio_init(context, trio_index);
	if (ret < 0)
147
		goto gxio_trio_init_failure;
148

149
	/* Allocate an ASID for the kernel. */
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	ret = gxio_trio_alloc_asids(context, 1, 0, 0);
	if (ret < 0) {
		pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
			trio_index);
		goto asid_alloc_failure;
	}

	context->asid = ret;

#ifdef USE_SHARED_PCIE_CONFIG_REGION
	/*
	 * Alloc a PIO region for config access, shared by all MACs per TRIO.
	 * This shouldn't fail since the kernel is supposed to the first
	 * client of the TRIO's PIO regions.
	 */
	ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
	if (ret < 0) {
		pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
			trio_index);
		goto pio_alloc_failure;
	}

	context->pio_cfg_index = ret;

	/*
	 * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
	 * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
	 */
	ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
		0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
	if (ret < 0) {
		pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
			trio_index);
		goto pio_alloc_failure;
	}
#endif

187
	/* Get the properties of the PCIe ports on this TRIO instance. */
188
	ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
189
	if (ret < 0) {
190 191
		pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
		       ret, trio_index);
192 193 194 195 196 197
		goto get_port_property_failure;
	}

	context->mmio_base_mac =
		iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
	if (context->mmio_base_mac == NULL) {
198 199
		pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
		       ret, trio_index);
200 201 202 203 204 205 206 207 208 209 210
		ret = -ENOMEM;

		goto trio_mmio_mapping_failure;
	}

	/* Check the port strap state which will override the BIB setting. */
	for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
		TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
		unsigned int reg_offset;

		/* Ignore ports that are not specified in the BIB. */
211 212
		if (!pcie_ports[trio_index].ports[mac].allow_rc &&
		    !pcie_ports[trio_index].ports[mac].allow_ep)
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
			continue;

		reg_offset =
			(TRIO_PCIE_INTFC_PORT_CONFIG <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		port_config.word =
			__gxio_mmio_read(context->mmio_base_mac + reg_offset);

		if (port_config.strap_state != AUTO_CONFIG_RC &&
		    port_config.strap_state != AUTO_CONFIG_RC_G1) {
			/*
			 * If this is really intended to be an EP port, record
			 * it so that the endpoint driver will know about it.
			 */
			if (port_config.strap_state == AUTO_CONFIG_EP ||
			    port_config.strap_state == AUTO_CONFIG_EP_G1)
233
				pcie_ports[trio_index].ports[mac].allow_ep = 1;
234 235 236
		}
	}

237 238
	return ret;

239 240
trio_mmio_mapping_failure:
get_port_property_failure:
241 242 243 244 245
asid_alloc_failure:
#ifdef USE_SHARED_PCIE_CONFIG_REGION
pio_alloc_failure:
#endif
	hv_dev_close(context->fd);
246 247
gxio_trio_init_failure:
	context->fd = -1;
248 249 250 251

	return ret;
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static int __init tile_trio_init(void)
{
	int i;

	/* We loop over all the TRIO shims. */
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		if (tile_pcie_open(i) < 0)
			continue;
		num_trio_shims++;
	}

	return 0;
}
postcore_initcall(tile_trio_init);

267
static void tilegx_legacy_irq_ack(struct irq_data *d)
268 269 270 271
{
	__insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
}

272
static void tilegx_legacy_irq_mask(struct irq_data *d)
273 274 275 276
{
	__insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
}

277
static void tilegx_legacy_irq_unmask(struct irq_data *d)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
{
	__insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
}

static struct irq_chip tilegx_legacy_irq_chip = {
	.name			= "tilegx_legacy_irq",
	.irq_ack		= tilegx_legacy_irq_ack,
	.irq_mask		= tilegx_legacy_irq_mask,
	.irq_unmask		= tilegx_legacy_irq_unmask,

	/* TBD: support set_affinity. */
};

/*
 * This is a wrapper function of the kernel level-trigger interrupt
 * handler handle_level_irq() for PCI legacy interrupts. The TRIO
 * is configured such that only INTx Assert interrupts are proxied
 * to Linux which just calls handle_level_irq() after clearing the
 * MAC INTx Assert status bit associated with this interrupt.
 */
298
static void trio_handle_level_irq(struct irq_desc *desc)
299 300 301 302 303 304 305 306
{
	struct pci_controller *controller = irq_desc_get_handler_data(desc);
	gxio_trio_context_t *trio_context = controller->trio;
	uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
	int mac = controller->mac;
	unsigned int reg_offset;
	uint64_t level_mask;

307
	handle_level_irq(desc);
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327

	/*
	 * Clear the INTx Level status, otherwise future interrupts are
	 * not sent.
	 */
	reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
		TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
		TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;

	__gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
}

/*
 * Create kernel irqs and set up the handlers for the legacy interrupts.
 * Also some minimum initialization for the MSI support.
 */
328
static int tile_init_irqs(struct pci_controller *controller)
329 330 331 332 333 334 335 336 337 338 339 340 341 342
{
	int i;
	int j;
	int irq;
	int result;

	cpumask_copy(&intr_cpus_map, cpu_online_mask);


	for (i = 0; i < 4; i++) {
		gxio_trio_context_t *context = controller->trio;
		int cpu;

		/* Ask the kernel to allocate an IRQ. */
343 344
		irq = irq_alloc_hwirq(-1);
		if (!irq) {
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
			pr_err("PCI: no free irq vectors, failed for %d\n", i);
			goto free_irqs;
		}
		controller->irq_intx_table[i] = irq;

		/* Distribute the 4 IRQs to different tiles. */
		cpu = tile_irq_cpu(irq);

		/* Configure the TRIO intr binding for this IRQ. */
		result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
						      cpu_y(cpu), KERNEL_PL,
						      irq, controller->mac, i);
		if (result < 0) {
			pr_err("PCI: MAC intx config failed for %d\n", i);

			goto free_irqs;
		}

363
		/* Register the IRQ handler with the kernel. */
364 365 366 367 368 369 370 371 372 373
		irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
					trio_handle_level_irq);
		irq_set_chip_data(irq, (void *)(uint64_t)i);
		irq_set_handler_data(irq, controller);
	}

	return 0;

free_irqs:
	for (j = 0; j < i; j++)
374
		irq_free_hwirq(controller->irq_intx_table[j]);
375 376 377 378

	return -1;
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
/*
 * Return 1 if the port is strapped to operate in RC mode.
 */
static int
strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
{
	TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
	unsigned int reg_offset;

	/* Check the port configuration. */
	reg_offset =
		(TRIO_PCIE_INTFC_PORT_CONFIG <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
	port_config.word =
		__gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);

	if (port_config.strap_state == AUTO_CONFIG_RC ||
	    port_config.strap_state == AUTO_CONFIG_RC_G1)
		return 1;
	else
		return 0;
}

405 406 407 408
/*
 * Find valid controllers and fill in pci_controller structs for each
 * of them.
 *
409
 * Return the number of controllers discovered.
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
 */
int __init tile_pci_init(void)
{
	int ctl_index = 0;
	int i, j;

	if (!pci_probe) {
		pr_info("PCI: disabled by boot argument\n");
		return 0;
	}

	pr_info("PCI: Searching for controllers...\n");

	if (num_trio_shims == 0 || sim_is_simulator())
		return 0;

	/*
427
	 * Now determine which PCIe ports are configured to operate in RC
A
Andrea Gelmini 已提交
428
	 * mode. There is a difference in the port configuration capability
429 430 431 432 433 434 435 436 437 438
	 * between the Gx36 and Gx72 devices.
	 *
	 * The Gx36 has configuration capability for each of the 3 PCIe
	 * interfaces (disable, auto endpoint, auto RC, etc.).
	 * On the Gx72, you can only select one of the 3 PCIe interfaces per
	 * TRIO to train automatically. Further, the allowable training modes
	 * are reduced to four options (auto endpoint, auto RC, stream x1,
	 * stream x4).
	 *
	 * For Gx36 ports, it must be allowed to be in RC mode by the
439 440
	 * Board Information Block, and the hardware strapping pins must be
	 * set to RC mode.
441 442 443 444 445 446 447
	 *
	 * For Gx72 ports, the port will operate in RC mode if either of the
	 * following is true:
	 * 1. It is allowed to be in RC mode by the Board Information Block,
	 *    and the BIB doesn't allow the EP mode.
	 * 2. It is allowed to be in either the RC or the EP mode by the BIB,
	 *    and the hardware strapping pin is set to RC mode.
448 449 450 451 452 453 454 455
	 */
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		gxio_trio_context_t *context = &trio_contexts[i];

		if (context->fd < 0)
			continue;

		for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
456 457 458 459 460 461 462 463 464 465 466 467
			int is_rc = 0;

			if (pcie_ports[i].is_gx72 &&
			    pcie_ports[i].ports[j].allow_rc) {
				if (!pcie_ports[i].ports[j].allow_ep ||
				    strapped_for_rc(context, j))
					is_rc = 1;
			} else if (pcie_ports[i].ports[j].allow_rc &&
				   strapped_for_rc(context, j)) {
				is_rc = 1;
			}
			if (is_rc) {
468 469 470 471 472 473
				pcie_rc[i][j] = 1;
				num_rc_controllers++;
			}
		}
	}

474
	/* Return if no PCIe ports are configured to operate in RC mode. */
475 476 477
	if (num_rc_controllers == 0)
		return 0;

478
	/* Set the TRIO pointer and MAC index for each PCIe RC port. */
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
	for (i = 0; i < TILEGX_NUM_TRIO; i++) {
		for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
			if (pcie_rc[i][j]) {
				pci_controllers[ctl_index].trio =
					&trio_contexts[i];
				pci_controllers[ctl_index].mac = j;
				pci_controllers[ctl_index].trio_index = i;
				ctl_index++;
				if (ctl_index == num_rc_controllers)
					goto out;
			}
		}
	}

out:
494
	/* Configure each PCIe RC port. */
495 496
	for (i = 0; i < num_rc_controllers; i++) {

497
		/* Configure the PCIe MAC to run in RC mode. */
498 499 500 501 502
		struct pci_controller *controller = &pci_controllers[i];

		controller->index = i;
		controller->ops = &tile_cfg_ops;

503 504 505 506 507 508 509 510 511 512 513
		controller->io_space.start = PCIBIOS_MIN_IO +
			(i * IO_SPACE_SIZE);
		controller->io_space.end = controller->io_space.start +
			IO_SPACE_SIZE - 1;
		BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
		controller->io_space.flags = IORESOURCE_IO;
		snprintf(controller->io_space_name,
			 sizeof(controller->io_space_name),
			 "PCI I/O domain %d", i);
		controller->io_space.name = controller->io_space_name;

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
		/*
		 * The PCI memory resource is located above the PA space.
		 * For every host bridge, the BAR window or the MMIO aperture
		 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
		 * PA space.
		 */
		controller->mem_offset = TILE_PCI_MEM_START +
			(i * TILE_PCI_BAR_WINDOW_TOP);
		controller->mem_space.start = controller->mem_offset +
			TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
		controller->mem_space.end = controller->mem_offset +
			TILE_PCI_BAR_WINDOW_TOP - 1;
		controller->mem_space.flags = IORESOURCE_MEM;
		snprintf(controller->mem_space_name,
			 sizeof(controller->mem_space_name),
			 "PCI mem domain %d", i);
		controller->mem_space.name = controller->mem_space_name;
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	}

	return num_rc_controllers;
}

/*
 * (pin - 1) converts from the PCI standard's [1:4] convention to
 * a normal [0:3] range.
 */
static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
{
	struct pci_controller *controller =
		(struct pci_controller *)dev->sysdata;
	return controller->irq_intx_table[pin - 1];
}

547
static void fixup_read_and_payload_sizes(struct pci_controller *controller)
548 549 550 551 552 553 554 555 556 557 558 559
{
	gxio_trio_context_t *trio_context = controller->trio;
	struct pci_bus *root_bus = controller->root_bus;
	TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
	TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
	unsigned int reg_offset;
	struct pci_bus *child;
	int mac;
	int err;

	mac = controller->mac;

560
	/* Set our max read request size to be 4KB. */
561 562 563 564 565 566 567 568
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CONTROL <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
569
					      reg_offset);
570 571
	dev_control.max_read_req_sz = 5;
	__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
572
			    dev_control.word);
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	/*
	 * Set the max payload size supported by this Gx PCIe MAC.
	 * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
	 * experiments have shown that setting MPS to 256 yields the
	 * best performance.
	 */
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CAP <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
588
					     reg_offset);
589 590
	rc_dev_cap.mps_sup = 1;
	__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
591
			    rc_dev_cap.word);
592 593

	/* Configure PCI Express MPS setting. */
594 595
	list_for_each_entry(child, &root_bus->children, node)
		pcie_bus_configure_settings(child);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613

	/*
	 * Set the mac_config register in trio based on the MPS/MRS of the link.
	 */
	reg_offset =
		(TRIO_PCIE_RC_DEVICE_CONTROL <<
			TRIO_CFG_REGION_ADDR__REG_SHIFT) |
		(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
			TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
		(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

	dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
						reg_offset);

	err = gxio_trio_set_mps_mrs(trio_context,
				    dev_control.max_payload_size,
				    dev_control.max_read_req_sz,
				    mac);
614
	if (err < 0) {
615 616
		pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
		       mac, controller->trio_index);
617 618 619
	}
}

620
static int setup_pcie_rc_delay(char *str)
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
{
	unsigned long delay = 0;
	unsigned long trio_index;
	unsigned long mac;

	if (str == NULL || !isdigit(*str))
		return -EINVAL;
	trio_index = simple_strtoul(str, (char **)&str, 10);
	if (trio_index >= TILEGX_NUM_TRIO)
		return -EINVAL;

	if (*str != ',')
		return -EINVAL;

	str++;
	if (!isdigit(*str))
		return -EINVAL;
	mac = simple_strtoul(str, (char **)&str, 10);
	if (mac >= TILEGX_TRIO_PCIES)
		return -EINVAL;

	if (*str != '\0') {
		if (*str != ',')
			return -EINVAL;

		str++;
		if (!isdigit(*str))
			return -EINVAL;
		delay = simple_strtoul(str, (char **)&str, 10);
	}

	rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
	return 0;
}
early_param("pcie_rc_delay", setup_pcie_rc_delay);

657
/* PCI initialization entry point, called by subsys_initcall. */
658 659 660 661
int __init pcibios_init(void)
{
	resource_size_t offset;
	LIST_HEAD(resources);
662
	int next_busno;
663
	struct pci_host_bridge *bridge;
664 665
	int i;

666 667
	tile_pci_init();

668
	if (num_rc_controllers == 0)
669 670 671 672 673 674 675 676 677 678
		return 0;

	/*
	 * Delay a bit in case devices aren't ready.  Some devices are
	 * known to require at least 20ms here, but we use a more
	 * conservative value.
	 */
	msleep(250);

	/* Scan all of the recorded PCI controllers.  */
679
	for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
		struct pci_controller *controller = &pci_controllers[i];
		gxio_trio_context_t *trio_context = controller->trio;
		TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
		TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
		struct pci_bus *bus;
		unsigned int reg_offset;
		unsigned int class_code_revision;
		int trio_index;
		int mac;
		int ret;

		if (trio_context->fd < 0)
			continue;

		trio_index = controller->trio_index;
		mac = controller->mac;

		/*
698 699
		 * Check for PCIe link-up status to decide if we need
		 * to force the link to come up.
700 701
		 */
		reg_offset =
702
			(TRIO_PCIE_INTFC_PORT_STATUS <<
703 704
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
705
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
706 707
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

708
		port_status.word =
709 710
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
711 712
		if (!port_status.dl_up) {
			if (rc_delay[trio_index][mac]) {
713
				pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
714 715 716 717 718 719
					rc_delay[trio_index][mac], mac,
					trio_index);
				msleep(rc_delay[trio_index][mac] * 1000);
			}
			ret = gxio_trio_force_rc_link_up(trio_context, mac);
			if (ret < 0)
720 721
				pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
				       mac, trio_index);
722 723
		}

724 725
		pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
			i, trio_index, controller->mac);
726

727
		/* Delay the bus probe if needed. */
728
		if (rc_delay[trio_index][mac]) {
729 730
			pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
				rc_delay[trio_index][mac], mac, trio_index);
731 732 733 734 735 736 737 738
			msleep(rc_delay[trio_index][mac] * 1000);
		} else {
			/*
			 * Wait a bit here because some EP devices
			 * take longer to come up.
			 */
			msleep(1000);
		}
739

740
		/* Check for PCIe link-up status again. */
741 742 743 744
		port_status.word =
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
		if (!port_status.dl_up) {
745
			if (pcie_ports[trio_index].ports[mac].removable) {
746 747
				pr_info("PCI: link is down, MAC %d on TRIO %d\n",
					mac, trio_index);
748
				pr_info("This is expected if no PCIe card is connected to this link\n");
749 750
			} else
				pr_err("PCI: link is down, MAC %d on TRIO %d\n",
751
				       mac, trio_index);
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
			continue;
		}

		/*
		 * Ensure that the link can come out of L1 power down state.
		 * Strictly speaking, this is needed only in the case of
		 * heavy RC-initiated DMAs.
		 */
		reg_offset =
			(TRIO_PCIE_INTFC_TX_FIFO_CTL <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
		tx_fifo_ctl.word =
			__gxio_mmio_read(trio_context->mmio_base_mac +
					 reg_offset);
		tx_fifo_ctl.min_p_credits = 0;
		__gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
				  tx_fifo_ctl.word);

		/*
		 * Change the device ID so that Linux bus crawl doesn't confuse
		 * the internal bridge with any Tilera endpoints.
		 */
		reg_offset =
			(TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		__gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
				    (TILERA_GX36_RC_DEV_ID <<
				    TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
				    TILERA_VENDOR_ID);

789
		/* Set the internal P2P bridge class code. */
790 791 792 793 794 795 796 797 798 799
		reg_offset =
			(TRIO_PCIE_RC_REVISION_ID <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
			(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
				TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
			(mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

		class_code_revision =
			__gxio_mmio_read32(trio_context->mmio_base_mac +
					   reg_offset);
800 801
		class_code_revision = (class_code_revision & 0xff) |
			(PCI_CLASS_BRIDGE_PCI << 16);
802 803 804 805 806 807

		__gxio_mmio_write32(trio_context->mmio_base_mac +
				    reg_offset, class_code_revision);

#ifdef USE_SHARED_PCIE_CONFIG_REGION

808
		/* Map in the MMIO space for the PIO region. */
809 810 811 812 813 814
		offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
			(((unsigned long long)mac) <<
			TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);

#else

815
		/* Alloc a PIO region for PCI config access per MAC. */
816 817
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
818 819
			pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
			       mac, trio_index);
820 821 822 823 824 825

			continue;
		}

		trio_context->pio_cfg_index[mac] = ret;

826
		/* For PIO CFG, the bus_address_hi parameter is 0. */
827 828 829 830
		ret = gxio_trio_init_pio_region_aux(trio_context,
			trio_context->pio_cfg_index[mac],
			mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
		if (ret < 0) {
831 832
			pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
			       mac, trio_index);
833 834 835 836 837 838 839 840 841 842

			continue;
		}

		offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
			(((unsigned long long)mac) <<
			TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);

#endif

843 844 845 846 847 848
		/*
		 * To save VMALLOC space, we take advantage of the fact that
		 * bit 29 in the PIO CFG address format is reserved 0. With
		 * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
		 * this cuts VMALLOC space usage from 1GB to 512MB per mac.
		 */
849
		trio_context->mmio_base_pio_cfg[mac] =
850 851
			iorpc_ioremap(trio_context->fd, offset, (1UL <<
			(TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
852 853
		if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
			pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
854
			       mac, trio_index);
855 856 857 858

			continue;
		}

859
		/* Initialize the PCIe interrupts. */
860 861 862 863 864 865 866
		if (tile_init_irqs(controller)) {
			pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
				mac, trio_index);

			continue;
		}

867 868 869
		/*
		 * The PCI memory resource is located above the PA space.
		 * The memory range for the PCI root bus should not overlap
870
		 * with the physical RAM.
871
		 */
872 873
		pci_add_resource_offset(&resources, &controller->mem_space,
					controller->mem_offset);
874
		pci_add_resource(&resources, &controller->io_space);
875
		controller->first_busno = next_busno;
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890

		bridge = pci_alloc_host_bridge(0);
		if (!bridge)
			break;

		list_splice_init(&resources, &bridge->windows);
		bridge->dev.parent = NULL;
		bridge->sysdata = controller;
		bridge->busnr = next_busno;
		bridge->ops = controller->ops;
		bridge->swizzle_irq = pci_common_swizzle;
		bridge->map_irq = tile_map_irq;

		pci_scan_root_bus_bridge(bridge);
		bus = bridge->bus;
891
		controller->root_bus = bus;
892
		next_busno = bus->busn_res.end + 1;
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
	}

	/*
	 * This comes from the generic Linux PCI driver.
	 *
	 * It allocates all of the resources (I/O memory, etc)
	 * associated with the devices read in above.
	 */
	pci_assign_unassigned_resources();

	/* Record the I/O resources in the PCI controller structure. */
	for (i = 0; i < num_rc_controllers; i++) {
		struct pci_controller *controller = &pci_controllers[i];
		gxio_trio_context_t *trio_context = controller->trio;
		struct pci_bus *root_bus = pci_controllers[i].root_bus;
		int ret;
		int j;

		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (root_bus == NULL)
			continue;

		/* Configure the max_payload_size values for this domain. */
		fixup_read_and_payload_sizes(controller);

921
		/* Alloc a PIO region for PCI memory access for each RC port. */
922 923
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
924 925
			pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
926 927 928 929 930 931 932 933 934 935 936 937 938

			continue;
		}

		controller->pio_mem_index = ret;

		/*
		 * For PIO MEM, the bus_address_hi parameter is hard-coded 0
		 * because we always assign 32-bit PCI bus BAR ranges.
		 */
		ret = gxio_trio_init_pio_region_aux(trio_context,
						    controller->pio_mem_index,
						    controller->mac,
939
						    0,
940 941
						    0);
		if (ret < 0) {
942 943
			pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
944 945 946 947

			continue;
		}

948 949 950 951 952 953
#ifdef CONFIG_TILE_PCI_IO
		/*
		 * Alloc a PIO region for PCI I/O space access for each RC port.
		 */
		ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
		if (ret < 0) {
954 955
			pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971

			continue;
		}

		controller->pio_io_index = ret;

		/*
		 * For PIO IO, the bus_address_hi parameter is hard-coded 0
		 * because PCI I/O address space is 32-bit.
		 */
		ret = gxio_trio_init_pio_region_aux(trio_context,
						    controller->pio_io_index,
						    controller->mac,
						    0,
						    HV_TRIO_PIO_FLAG_IO_SPACE);
		if (ret < 0) {
972 973
			pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
			       controller->trio_index, controller->mac);
974 975 976 977 978

			continue;
		}
#endif

979 980 981 982 983 984 985 986 987 988 989 990 991
		/*
		 * Configure a Mem-Map region for each memory controller so
		 * that Linux can map all of its PA space to the PCI bus.
		 * Use the IOMMU to handle hash-for-home memory.
		 */
		for_each_online_node(j) {
			unsigned long start_pfn = node_start_pfn[j];
			unsigned long end_pfn = node_end_pfn[j];
			unsigned long nr_pages = end_pfn - start_pfn;

			ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
							  0);
			if (ret < 0) {
992 993 994
				pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
				       controller->trio_index, controller->mac,
				       j);
995 996 997 998 999 1000 1001 1002 1003 1004

				goto alloc_mem_map_failed;
			}

			controller->mem_maps[j] = ret;

			/*
			 * Initialize the Mem-Map and the I/O MMU so that all
			 * the physical memory can be accessed by the endpoint
			 * devices. The base bus address is set to the base CPA
1005 1006
			 * of this memory controller plus an offset (see pci.h).
			 * The region's base VA is set to the base CPA. The
1007
			 * I/O MMU table essentially translates the CPA to
1008 1009 1010 1011
			 * the real PA. Implicitly, for node 0, we create
			 * a separate Mem-Map region that serves as the inbound
			 * window for legacy 32-bit devices. This is a direct
			 * map of the low 4GB CPA space.
1012 1013 1014 1015 1016 1017 1018
			 */
			ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
				controller->mem_maps[j],
				start_pfn << PAGE_SHIFT,
				nr_pages << PAGE_SHIFT,
				trio_context->asid,
				controller->mac,
1019 1020
				(start_pfn << PAGE_SHIFT) +
				TILE_PCI_MEM_MAP_BASE_OFFSET,
1021 1022 1023
				j,
				GXIO_TRIO_ORDER_MODE_UNORDERED);
			if (ret < 0) {
1024 1025 1026
				pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
				       controller->trio_index, controller->mac,
				       j);
1027 1028 1029 1030 1031 1032 1033 1034

				goto alloc_mem_map_failed;
			}
			continue;

alloc_mem_map_failed:
			break;
		}
1035 1036

		pci_bus_add_devices(root_bus);
1037 1038 1039 1040 1041 1042
	}

	return 0;
}
subsys_initcall(pcibios_init);

1043
/* Process any "pci=" kernel boot arguments. */
1044
char *__init pcibios_setup(char *str)
1045 1046 1047 1048 1049 1050 1051 1052
{
	if (!strcmp(str, "off")) {
		pci_probe = 0;
		return NULL;
	}
	return str;
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
/*
 * Called for each device after PCI setup is done.
 * We initialize the PCI device capabilities conservatively, assuming that
 * all devices can only address the 32-bit DMA space. The exception here is
 * that the device dma_offset is set to the value that matches the 64-bit
 * capable devices. This is OK because dma_offset is not used by legacy
 * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
 * This implementation matches the kernel design of setting PCI devices'
 * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
 * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
 */
1064
static void pcibios_fixup_final(struct pci_dev *pdev)
1065
{
1066
	set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
1067 1068 1069
	set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
	pdev->dev.archdata.max_direct_dma_addr =
		TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1070
	pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1071 1072 1073
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
/* Map a PCI MMIO bus address into VA space. */
void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
{
	struct pci_controller *controller = NULL;
	resource_size_t bar_start;
	resource_size_t bar_end;
	resource_size_t offset;
	resource_size_t start;
	resource_size_t end;
	int trio_fd;
1084
	int i;
1085 1086 1087 1088 1089

	start = phys_addr;
	end = phys_addr + size - 1;

	/*
1090
	 * By searching phys_addr in each controller's mem_space, we can
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	 * determine the controller that should accept the PCI memory access.
	 */
	for (i = 0; i < num_rc_controllers; i++) {
		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (pci_controllers[i].root_bus == NULL)
			continue;

1101 1102
		bar_start = pci_controllers[i].mem_space.start;
		bar_end = pci_controllers[i].mem_space.end;
1103

1104 1105 1106
		if ((start >= bar_start) && (end <= bar_end)) {
			controller = &pci_controllers[i];
			break;
1107 1108 1109 1110 1111 1112 1113 1114
		}
	}

	if (controller == NULL)
		return NULL;

	trio_fd = controller->trio->fd;

1115 1116 1117 1118
	/* Convert the resource start to the bus address offset. */
	start = phys_addr - controller->mem_offset;

	offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
1119

1120
	/* We need to keep the PCI bus address's in-page offset in the VA. */
1121
	return iorpc_ioremap(trio_fd, offset, size) +
1122
		(start & (PAGE_SIZE - 1));
1123 1124 1125
}
EXPORT_SYMBOL(ioremap);

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
#ifdef CONFIG_TILE_PCI_IO
/* Map a PCI I/O address into VA space. */
void __iomem *ioport_map(unsigned long port, unsigned int size)
{
	struct pci_controller *controller = NULL;
	resource_size_t bar_start;
	resource_size_t bar_end;
	resource_size_t offset;
	resource_size_t start;
	resource_size_t end;
	int trio_fd;
	int i;

	start = port;
	end = port + size - 1;

	/*
1143 1144
	 * By searching the port in each controller's io_space, we can
	 * determine the controller that should accept the PCI I/O access.
1145 1146 1147 1148 1149 1150 1151 1152 1153
	 */
	for (i = 0; i < num_rc_controllers; i++) {
		/*
		 * Skip controllers that are not properly initialized or
		 * have down links.
		 */
		if (pci_controllers[i].root_bus == NULL)
			continue;

1154 1155
		bar_start = pci_controllers[i].io_space.start;
		bar_end = pci_controllers[i].io_space.end;
1156 1157 1158

		if ((start >= bar_start) && (end <= bar_end)) {
			controller = &pci_controllers[i];
1159
			break;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
		}
	}

	if (controller == NULL)
		return NULL;

	trio_fd = controller->trio->fd;

	/* Convert the resource start to the bus address offset. */
	port -= controller->io_space.start;

	offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;

1173
	/* We need to keep the PCI bus address's in-page offset in the VA. */
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
}
EXPORT_SYMBOL(ioport_map);

void ioport_unmap(void __iomem *addr)
{
	iounmap(addr);
}
EXPORT_SYMBOL(ioport_unmap);
#endif

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
	iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);

/****************************************************************
 *
 * Tile PCI config space read/write routines
 *
 ****************************************************************/

/*
 * These are the normal read and write ops
 * These are expanded with macros from  pci_bus_read_config_byte() etc.
 *
 * devfn is the combined PCI device & function.
 *
 * offset is in bytes, from the start of config space for the
 * specified bus & device.
 */
1206 1207
static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
			 int size, u32 *val)
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
{
	struct pci_controller *controller = bus->sysdata;
	gxio_trio_context_t *trio_context = controller->trio;
	int busnum = bus->number & 0xff;
	int device = PCI_SLOT(devfn);
	int function = PCI_FUNC(devfn);
	int config_type = 1;
	TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
	void *mmio_addr;

	/*
1219
	 * Map all accesses to the local device on root bus into the
1220 1221 1222
	 * MMIO space of the MAC. Accesses to the downstream devices
	 * go to the PIO space.
	 */
1223
	if (pci_is_root_bus(bus)) {
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
		if (device == 0) {
			/*
			 * This is the internal downstream P2P bridge,
			 * access directly.
			 */
			unsigned int reg_offset;

			reg_offset = ((offset & 0xFFF) <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
				(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
				<< TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
				(controller->mac <<
					TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

			mmio_addr = trio_context->mmio_base_mac + reg_offset;

			goto valid_device;

		} else {
			/*
			 * We fake an empty device for (device > 0),
			 * since there is only one device on bus 0.
			 */
			goto invalid_device;
		}
	}

	/*
1252
	 * Accesses to the directly attached device have to be
1253 1254
	 * sent as type-0 configs.
	 */
1255
	if (busnum == (controller->first_busno + 1)) {
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
		/*
		 * There is only one device off of our built-in P2P bridge.
		 */
		if (device != 0)
			goto invalid_device;

		config_type = 0;
	}

	cfg_addr.word = 0;
	cfg_addr.reg_addr = (offset & 0xFFF);
	cfg_addr.fn = function;
	cfg_addr.dev = device;
	cfg_addr.bus = busnum;
	cfg_addr.type = config_type;

	/*
	 * Note that we don't set the mac field in cfg_addr because the
	 * mapping is per port.
	 */
	mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1277
		cfg_addr.word;
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325

valid_device:

	switch (size) {
	case 4:
		*val = __gxio_mmio_read32(mmio_addr);
		break;

	case 2:
		*val = __gxio_mmio_read16(mmio_addr);
		break;

	case 1:
		*val = __gxio_mmio_read8(mmio_addr);
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

	TRACE_CFG_RD(size, *val, busnum, device, function, offset);

	return 0;

invalid_device:

	switch (size) {
	case 4:
		*val = 0xFFFFFFFF;
		break;

	case 2:
		*val = 0xFFFF;
		break;

	case 1:
		*val = 0xFF;
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

	return 0;
}


/*
1326
 * See tile_cfg_read() for relevant comments.
1327 1328
 * Note that "val" is the value to write, not a pointer to that value.
 */
1329 1330
static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
			  int size, u32 val)
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
{
	struct pci_controller *controller = bus->sysdata;
	gxio_trio_context_t *trio_context = controller->trio;
	int busnum = bus->number & 0xff;
	int device = PCI_SLOT(devfn);
	int function = PCI_FUNC(devfn);
	int config_type = 1;
	TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
	void *mmio_addr;
	u32 val_32 = (u32)val;
	u16 val_16 = (u16)val;
	u8 val_8 = (u8)val;

	/*
1345
	 * Map all accesses to the local device on root bus into the
1346 1347 1348
	 * MMIO space of the MAC. Accesses to the downstream devices
	 * go to the PIO space.
	 */
1349
	if (pci_is_root_bus(bus)) {
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
		if (device == 0) {
			/*
			 * This is the internal downstream P2P bridge,
			 * access directly.
			 */
			unsigned int reg_offset;

			reg_offset = ((offset & 0xFFF) <<
				TRIO_CFG_REGION_ADDR__REG_SHIFT) |
				(TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
				<< TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
				(controller->mac <<
					TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);

			mmio_addr = trio_context->mmio_base_mac + reg_offset;

			goto valid_device;

		} else {
			/*
			 * We fake an empty device for (device > 0),
			 * since there is only one device on bus 0.
			 */
			goto invalid_device;
		}
	}

	/*
1378
	 * Accesses to the directly attached device have to be
1379 1380
	 * sent as type-0 configs.
	 */
1381
	if (busnum == (controller->first_busno + 1)) {
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
		/*
		 * There is only one device off of our built-in P2P bridge.
		 */
		if (device != 0)
			goto invalid_device;

		config_type = 0;
	}

	cfg_addr.word = 0;
	cfg_addr.reg_addr = (offset & 0xFFF);
	cfg_addr.fn = function;
	cfg_addr.dev = device;
	cfg_addr.bus = busnum;
	cfg_addr.type = config_type;

	/*
	 * Note that we don't set the mac field in cfg_addr because the
	 * mapping is per port.
	 */
	mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
			cfg_addr.word;

valid_device:

	switch (size) {
	case 4:
		__gxio_mmio_write32(mmio_addr, val_32);
		TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
		break;

	case 2:
		__gxio_mmio_write16(mmio_addr, val_16);
		TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
		break;

	case 1:
		__gxio_mmio_write8(mmio_addr, val_8);
		TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
		break;

	default:
		return PCIBIOS_FUNC_NOT_SUPPORTED;
	}

invalid_device:

	return 0;
}


static struct pci_ops tile_cfg_ops = {
	.read =         tile_cfg_read,
	.write =        tile_cfg_write,
};


1439 1440
/* MSI support starts here. */
static unsigned int tilegx_msi_startup(struct irq_data *d)
1441
{
1442
	if (irq_data_get_msi_desc(d))
1443
		pci_msi_unmask_irq(d);
1444 1445 1446 1447

	return 0;
}

1448
static void tilegx_msi_ack(struct irq_data *d)
1449 1450 1451 1452
{
	__insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
}

1453
static void tilegx_msi_mask(struct irq_data *d)
1454
{
1455
	pci_msi_mask_irq(d);
1456 1457 1458
	__insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
}

1459
static void tilegx_msi_unmask(struct irq_data *d)
1460 1461
{
	__insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1462
	pci_msi_unmask_irq(d);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
}

static struct irq_chip tilegx_msi_chip = {
	.name			= "tilegx_msi",
	.irq_startup		= tilegx_msi_startup,
	.irq_ack		= tilegx_msi_ack,
	.irq_mask		= tilegx_msi_mask,
	.irq_unmask		= tilegx_msi_unmask,

	/* TBD: support set_affinity. */
};

int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
	struct pci_controller *controller;
	gxio_trio_context_t *trio_context;
	struct msi_msg msg;
	int default_irq;
	uint64_t mem_map_base;
	uint64_t mem_map_limit;
	u64 msi_addr;
	int mem_map;
	int cpu;
	int irq;
	int ret;

1489 1490 1491
	irq = irq_alloc_hwirq(-1);
	if (!irq)
		return -ENOSPC;
1492 1493 1494 1495 1496 1497 1498 1499

	/*
	 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
	 * devices that are not capable of generating a 64-bit message address.
	 * These devices will fall back to using the legacy interrupts.
	 * Most PCIe endpoint devices do support 64-bit message addressing.
	 */
	if (desc->msi_attrib.is_64 == 0) {
1500
		dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

		ret = -ENOMEM;
		goto is_64_failure;
	}

	default_irq = desc->msi_attrib.default_irq;
	controller = irq_get_handler_data(default_irq);

	BUG_ON(!controller);

	trio_context = controller->trio;

	/*
1514 1515 1516 1517
	 * Allocate a scatter-queue that will accept the MSI write and
	 * trigger the TILE-side interrupts. We use the scatter-queue regions
	 * before the mem map regions, because the latter are needed by more
	 * applications.
1518
	 */
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
	if (mem_map >= 0) {
		TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
			.pop = 0,
			.doorbell = 1,
		}};

		mem_map += TRIO_NUM_MAP_MEM_REGIONS;
		mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
			mem_map * MEM_MAP_INTR_REGION_SIZE;
		mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;

		msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
		msg.data = (unsigned int)doorbell_template.word;
	} else {
		/* SQ regions are out, allocate from map mem regions. */
		mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
		if (mem_map < 0) {
1537 1538
			dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
				 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1539 1540 1541
			ret = -ENOMEM;
			goto msi_mem_map_alloc_failure;
		}
1542

1543 1544 1545 1546 1547 1548 1549 1550
		mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
			mem_map * MEM_MAP_INTR_REGION_SIZE;
		mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;

		msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
			TRIO_MAP_MEM_REG_INT0;

		msg.data = mem_map;
1551 1552 1553 1554 1555 1556
	}

	/* We try to distribute different IRQs to different tiles. */
	cpu = tile_irq_cpu(irq);

	/*
1557
	 * Now call up to the HV to configure the MSI interrupt and
1558 1559 1560 1561 1562 1563 1564
	 * set up the IPI binding.
	 */
	ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
					KERNEL_PL, irq, controller->mac,
					mem_map, mem_map_base, mem_map_limit,
					trio_context->asid);
	if (ret < 0) {
1565
		dev_info(&pdev->dev, "HV MSI config failed\n");
1566 1567 1568 1569 1570 1571 1572 1573 1574

		goto hv_msi_config_failure;
	}

	irq_set_msi_desc(irq, desc);

	msg.address_hi = msi_addr >> 32;
	msg.address_lo = msi_addr & 0xffffffff;

1575
	pci_write_msi_msg(irq, &msg);
1576 1577 1578 1579 1580 1581 1582 1583 1584
	irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
	irq_set_handler_data(irq, controller);

	return 0;

hv_msi_config_failure:
	/* Free mem-map */
msi_mem_map_alloc_failure:
is_64_failure:
1585
	irq_free_hwirq(irq);
1586 1587 1588 1589 1590
	return ret;
}

void arch_teardown_msi_irq(unsigned int irq)
{
1591
	irq_free_hwirq(irq);
1592
}