pci-xgene.c 16.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2 3 4 5 6 7 8
/**
 * APM X-Gene PCIe Driver
 *
 * Copyright (c) 2014 Applied Micro Circuits Corporation.
 *
 * Author: Tanmay Inamdar <tinamdar@apm.com>.
 */
9
#include <linux/clk.h>
10 11 12 13
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/memblock.h>
14
#include <linux/init.h>
15 16 17 18 19
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
20 21
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
22 23 24
#include <linux/platform_device.h>
#include <linux/slab.h>

25 26
#include "../pci.h"

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#define PCIECORE_CTLANDSTATUS		0x50
#define PIM1_1L				0x80
#define IBAR2				0x98
#define IR2MSK				0x9c
#define PIM2_1L				0xa0
#define IBAR3L				0xb4
#define IR3MSKL				0xbc
#define PIM3_1L				0xc4
#define OMR1BARL			0x100
#define OMR2BARL			0x118
#define OMR3BARL			0x130
#define CFGBARL				0x154
#define CFGBARH				0x158
#define CFGCTL				0x15c
#define RTDID				0x160
#define BRIDGE_CFG_0			0x2000
#define BRIDGE_CFG_4			0x2010
#define BRIDGE_STATUS_0			0x2600

#define LINK_UP_MASK			0x00000100
#define AXI_EP_CFG_ACCESS		0x10000
#define EN_COHERENCY			0xF0000000
#define EN_REG				0x00000001
#define OB_LO_IO			0x00000002
#define XGENE_PCIE_VENDORID		0x10E8
#define XGENE_PCIE_DEVICEID		0xE004
#define SZ_1T				(SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src)		((0xc000 & (u32)(src)) >> 0xe)

56
#define XGENE_V1_PCI_EXP_CAP		0x40
57 58 59 60

/* PCIe IP version */
#define XGENE_PCIE_IP_VER_UNKN		0
#define XGENE_PCIE_IP_VER_1		1
61
#define XGENE_PCIE_IP_VER_2		2
62

63
#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
64 65 66 67 68 69 70 71
struct xgene_pcie_port {
	struct device_node	*node;
	struct device		*dev;
	struct clk		*clk;
	void __iomem		*csr_base;
	void __iomem		*cfg_base;
	unsigned long		cfg_addr;
	bool			link_up;
72
	u32			version;
73 74
};

B
Bjorn Helgaas 已提交
75 76 77 78 79 80 81 82 83 84
static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
{
	return readl(port->csr_base + reg);
}

static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
{
	writel(val, port->csr_base + reg);
}

85 86 87 88 89
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}

90 91 92 93 94 95 96 97 98 99 100
static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
{
	struct pci_config_window *cfg;

	if (acpi_disabled)
		return (struct xgene_pcie_port *)(bus->sysdata);

	cfg = bus->sysdata;
	return (struct xgene_pcie_port *)(cfg->priv);
}

101 102 103 104 105 106
/*
 * When the address bit [17:16] is 2'b01, the Configuration access will be
 * treated as Type 1 and it will be forwarded to external PCIe device.
 */
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
107
	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
108 109 110 111 112 113 114 115 116 117 118 119 120

	if (bus->number >= (bus->primary + 1))
		return port->cfg_base + AXI_EP_CFG_ACCESS;

	return port->cfg_base;
}

/*
 * For Configuration request, RTDID register is used as Bus Number,
 * Device Number and Function number of the header fields.
 */
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
121
	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
122 123 124 125 126 127 128 129 130 131
	unsigned int b, d, f;
	u32 rtdid_val = 0;

	b = bus->number;
	d = PCI_SLOT(devfn);
	f = PCI_FUNC(devfn);

	if (!pci_is_root_bus(bus))
		rtdid_val = (b << 8) | (d << 3) | f;

B
Bjorn Helgaas 已提交
132
	xgene_pcie_writel(port, RTDID, rtdid_val);
133
	/* read the register back to ensure flush */
B
Bjorn Helgaas 已提交
134
	xgene_pcie_readl(port, RTDID);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
}

/*
 * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
 * the translation from PCI bus to native BUS.  Entire DDR region
 * is mapped into PCIe space using these registers, so it can be
 * reached by DMA from EP devices.  The BAR0/1 of bridge should be
 * hidden during enumeration to avoid the sizing and resource allocation
 * by PCIe core.
 */
static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
{
	if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
				     (offset == PCI_BASE_ADDRESS_1)))
		return true;

	return false;
}

154
static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
B
Bjorn Helgaas 已提交
155
					int offset)
156
{
157
	if ((pci_is_root_bus(bus) && devfn != 0) ||
158 159
	    xgene_pcie_hide_rc_bars(bus, offset))
		return NULL;
160 161

	xgene_pcie_set_rtdid_reg(bus, devfn);
162
	return xgene_pcie_get_cfg_base(bus) + offset;
163 164
}

165 166 167
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
				    int where, int size, u32 *val)
{
168
	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

	if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
	    PCIBIOS_SUCCESSFUL)
		return PCIBIOS_DEVICE_NOT_FOUND;

	/*
	 * The v1 controller has a bug in its Configuration Request
	 * Retry Status (CRS) logic: when CRS is enabled and we read the
	 * Vendor and Device ID of a non-existent device, the controller
	 * fabricates return data of 0xFFFF0001 ("device exists but is not
	 * ready") instead of 0xFFFFFFFF ("device does not exist").  This
	 * causes the PCI core to retry the read until it times out.
	 * Avoid this by not claiming to support CRS.
	 */
	if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
184
	    ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
185 186 187 188 189 190 191
		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);

	if (size <= 2)
		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);

	return PCIBIOS_SUCCESSFUL;
}
192
#endif
193

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
static int xgene_get_csr_resource(struct acpi_device *adev,
				  struct resource *res)
{
	struct device *dev = &adev->dev;
	struct resource_entry *entry;
	struct list_head list;
	unsigned long flags;
	int ret;

	INIT_LIST_HEAD(&list);
	flags = IORESOURCE_MEM;
	ret = acpi_dev_get_resources(adev, &list,
				     acpi_dev_filter_resource_type_cb,
				     (void *) flags);
	if (ret < 0) {
		dev_err(dev, "failed to parse _CRS method, error code %d\n",
			ret);
		return ret;
	}

	if (ret == 0) {
		dev_err(dev, "no IO and memory resources present in _CRS\n");
		return -EINVAL;
	}

	entry = list_first_entry(&list, struct resource_entry, node);
	*res = *entry->res;
	acpi_dev_free_resource_list(&list);
	return 0;
}

static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
{
	struct device *dev = cfg->parent;
	struct acpi_device *adev = to_acpi_device(dev);
	struct xgene_pcie_port *port;
	struct resource csr;
	int ret;

	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
	if (!port)
		return -ENOMEM;

	ret = xgene_get_csr_resource(adev, &csr);
	if (ret) {
		dev_err(dev, "can't get CSR resource\n");
		return ret;
	}
243
	port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
244 245
	if (IS_ERR(port->csr_base))
		return PTR_ERR(port->csr_base);
246 247 248 249 250 251 252 253 254 255 256 257 258

	port->cfg_base = cfg->win;
	port->version = ipversion;

	cfg->priv = port;
	return 0;
}

static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
{
	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
}

R
Rob Herring 已提交
259
const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
B
Bjorn Helgaas 已提交
260 261 262 263 264
	.init		= xgene_v1_pcie_ecam_init,
	.pci_ops	= {
		.map_bus	= xgene_pcie_map_bus,
		.read		= xgene_pcie_config_read32,
		.write		= pci_generic_config_write,
265 266 267 268 269 270 271 272
	}
};

static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
{
	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
}

R
Rob Herring 已提交
273
const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
B
Bjorn Helgaas 已提交
274 275 276 277 278
	.init		= xgene_v2_pcie_ecam_init,
	.pci_ops	= {
		.map_bus	= xgene_pcie_map_bus,
		.read		= xgene_pcie_config_read32,
		.write		= pci_generic_config_write,
279
	}
280
};
281
#endif
282

283
#if defined(CONFIG_PCI_XGENE)
284
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
285 286 287 288 289 290
				  u32 flags, u64 size)
{
	u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
	u32 val32 = 0;
	u32 val;

B
Bjorn Helgaas 已提交
291
	val32 = xgene_pcie_readl(port, addr);
292
	val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
B
Bjorn Helgaas 已提交
293
	xgene_pcie_writel(port, addr, val);
294

B
Bjorn Helgaas 已提交
295
	val32 = xgene_pcie_readl(port, addr + 0x04);
296
	val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
B
Bjorn Helgaas 已提交
297
	xgene_pcie_writel(port, addr + 0x04, val);
298

B
Bjorn Helgaas 已提交
299
	val32 = xgene_pcie_readl(port, addr + 0x04);
300
	val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
B
Bjorn Helgaas 已提交
301
	xgene_pcie_writel(port, addr + 0x04, val);
302

B
Bjorn Helgaas 已提交
303
	val32 = xgene_pcie_readl(port, addr + 0x08);
304
	val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
B
Bjorn Helgaas 已提交
305
	xgene_pcie_writel(port, addr + 0x08, val);
306 307 308 309 310

	return mask;
}

static void xgene_pcie_linkup(struct xgene_pcie_port *port,
B
Bjorn Helgaas 已提交
311
			      u32 *lanes, u32 *speed)
312 313 314 315
{
	u32 val32;

	port->link_up = false;
B
Bjorn Helgaas 已提交
316
	val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
317 318 319
	if (val32 & LINK_UP_MASK) {
		port->link_up = true;
		*speed = PIPE_PHY_RATE_RD(val32);
B
Bjorn Helgaas 已提交
320
		val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
321 322 323 324 325 326
		*lanes = val32 >> 26;
	}
}

static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
327
	struct device *dev = port->dev;
328 329
	int rc;

330
	port->clk = clk_get(dev, NULL);
331
	if (IS_ERR(port->clk)) {
332
		dev_err(dev, "clock not available\n");
333 334 335 336 337
		return -ENODEV;
	}

	rc = clk_prepare_enable(port->clk);
	if (rc) {
338
		dev_err(dev, "clock enable failed\n");
339 340 341 342 343 344 345 346 347
		return rc;
	}

	return 0;
}

static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
			      struct platform_device *pdev)
{
348
	struct device *dev = port->dev;
349 350 351
	struct resource *res;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
352
	port->csr_base = devm_pci_remap_cfg_resource(dev, res);
353 354 355
	if (IS_ERR(port->csr_base))
		return PTR_ERR(port->csr_base);

356
	port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
357 358 359 360 361 362 363 364 365 366 367
	if (IS_ERR(port->cfg_base))
		return PTR_ERR(port->cfg_base);
	port->cfg_addr = res->start;

	return 0;
}

static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
				    struct resource *res, u32 offset,
				    u64 cpu_addr, u64 pci_addr)
{
368
	struct device *dev = port->dev;
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	resource_size_t size = resource_size(res);
	u64 restype = resource_type(res);
	u64 mask = 0;
	u32 min_size;
	u32 flag = EN_REG;

	if (restype == IORESOURCE_MEM) {
		min_size = SZ_128M;
	} else {
		min_size = 128;
		flag |= OB_LO_IO;
	}

	if (size >= min_size)
		mask = ~(size - 1) | flag;
	else
385
		dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
386 387
			 (u64)size, min_size);

B
Bjorn Helgaas 已提交
388 389 390 391 392 393
	xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
	xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
	xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
	xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
	xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
	xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
394 395
}

396
static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
397
{
398 399
	u64 addr = port->cfg_addr;

B
Bjorn Helgaas 已提交
400 401 402
	xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
	xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
	xgene_pcie_writel(port, CFGCTL, EN_REG);
403 404
}

405
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
406
{
407
	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
408
	struct resource_entry *window;
409 410
	struct device *dev = port->dev;

411
	resource_list_for_each_entry(window, &bridge->windows) {
412 413 414
		struct resource *res = window->res;
		u64 restype = resource_type(res);

415
		dev_dbg(dev, "%pR\n", res);
416 417 418

		switch (restype) {
		case IORESOURCE_IO:
419 420
			xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
						pci_pio_to_address(res->start),
421 422 423
						res->start - window->offset);
			break;
		case IORESOURCE_MEM:
424 425 426 427 428 429 430 431 432 433
			if (res->flags & IORESOURCE_PREFETCH)
				xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
							res->start,
							res->start -
							window->offset);
			else
				xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
							res->start,
							res->start -
							window->offset);
434 435 436 437 438 439 440 441
			break;
		case IORESOURCE_BUS:
			break;
		default:
			dev_err(dev, "invalid resource %pR\n", res);
			return -EINVAL;
		}
	}
442
	xgene_pcie_setup_cfg_reg(port);
443 444 445
	return 0;
}

446 447
static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
				  u64 pim, u64 size)
448
{
B
Bjorn Helgaas 已提交
449 450 451 452 453
	xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
	xgene_pcie_writel(port, pim_reg + 0x04,
			  upper_32_bits(pim) | EN_COHERENCY);
	xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
	xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
}

/*
 * X-Gene PCIe support maximum 3 inbound memory regions
 * This function helps to select a region based on size of region
 */
static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
{
	if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
		*ib_reg_mask |= (1 << 1);
		return 1;
	}

	if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
		*ib_reg_mask |= (1 << 0);
		return 0;
	}

	if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
		*ib_reg_mask |= (1 << 2);
		return 2;
	}

	return -EINVAL;
}

static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
481 482
				    struct resource_entry *entry,
				    u8 *ib_reg_mask)
483 484
{
	void __iomem *cfg_base = port->cfg_base;
485
	struct device *dev = port->dev;
486
	void *bar_addr;
487
	u32 pim_reg;
488 489 490
	u64 cpu_addr = entry->res->start;
	u64 pci_addr = cpu_addr - entry->offset;
	u64 size = resource_size(entry->res);
491 492 493 494 495
	u64 mask = ~(size - 1) | EN_REG;
	u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
	u32 bar_low;
	int region;

496
	region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
497
	if (region < 0) {
498
		dev_warn(dev, "invalid pcie dma-range config\n");
499 500 501
		return;
	}

502
	if (entry->res->flags & IORESOURCE_PREFETCH)
503 504 505 506 507
		flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;

	bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
	switch (region) {
	case 0:
508
		xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
509 510 511
		bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
		writel(bar_low, bar_addr);
		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
512
		pim_reg = PIM1_1L;
513 514
		break;
	case 1:
B
Bjorn Helgaas 已提交
515 516
		xgene_pcie_writel(port, IBAR2, bar_low);
		xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
517
		pim_reg = PIM2_1L;
518 519
		break;
	case 2:
B
Bjorn Helgaas 已提交
520 521 522 523
		xgene_pcie_writel(port, IBAR3L, bar_low);
		xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
		xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
		xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
524
		pim_reg = PIM3_1L;
525 526 527
		break;
	}

528
	xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
529 530 531 532
}

static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
533 534
	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
	struct resource_entry *entry;
535 536
	u8 ib_reg_mask = 0;

537 538
	resource_list_for_each_entry(entry, &bridge->dma_ranges)
		xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
539 540 541 542 543 544 545 546 547 548

	return 0;
}

/* clear BAR configuration which was done by firmware */
static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
{
	int i;

	for (i = PIM1_1L; i <= CFGCTL; i += 4)
B
Bjorn Helgaas 已提交
549
		xgene_pcie_writel(port, i, 0);
550 551
}

552
static int xgene_pcie_setup(struct xgene_pcie_port *port)
553
{
554
	struct device *dev = port->dev;
555 556 557 558 559 560 561
	u32 val, lanes = 0, speed = 0;
	int ret;

	xgene_pcie_clear_config(port);

	/* setup the vendor and device IDs correctly */
	val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
B
Bjorn Helgaas 已提交
562
	xgene_pcie_writel(port, BRIDGE_CFG_0, val);
563

564
	ret = xgene_pcie_map_ranges(port);
565 566 567 568 569 570 571 572 573
	if (ret)
		return ret;

	ret = xgene_pcie_parse_map_dma_ranges(port);
	if (ret)
		return ret;

	xgene_pcie_linkup(port, &lanes, &speed);
	if (!port->link_up)
574
		dev_info(dev, "(rc) link down\n");
575
	else
576
		dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
577 578 579
	return 0;
}

580 581 582 583 584 585
static struct pci_ops xgene_pcie_ops = {
	.map_bus = xgene_pcie_map_bus,
	.read = xgene_pcie_config_read32,
	.write = pci_generic_config_write32,
};

586
static int xgene_pcie_probe(struct platform_device *pdev)
587
{
588 589
	struct device *dev = &pdev->dev;
	struct device_node *dn = dev->of_node;
590
	struct xgene_pcie_port *port;
591
	struct pci_host_bridge *bridge;
592 593
	int ret;

594 595
	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
	if (!bridge)
596
		return -ENOMEM;
597

598 599
	port = pci_host_bridge_priv(bridge);

600 601
	port->node = of_node_get(dn);
	port->dev = dev;
602

603 604 605 606
	port->version = XGENE_PCIE_IP_VER_UNKN;
	if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
		port->version = XGENE_PCIE_IP_VER_1;

607 608 609 610 611 612 613 614
	ret = xgene_pcie_map_reg(port, pdev);
	if (ret)
		return ret;

	ret = xgene_pcie_init_port(port);
	if (ret)
		return ret;

615
	ret = xgene_pcie_setup(port);
616
	if (ret)
617
		return ret;
618

619 620 621
	bridge->sysdata = port;
	bridge->ops = &xgene_pcie_ops;

622
	return pci_host_probe(bridge);
623 624 625 626 627 628 629 630 631
}

static const struct of_device_id xgene_pcie_match_table[] = {
	{.compatible = "apm,xgene-pcie",},
	{},
};

static struct platform_driver xgene_pcie_driver = {
	.driver = {
B
Bjorn Helgaas 已提交
632 633 634
		.name = "xgene-pcie",
		.of_match_table = of_match_ptr(xgene_pcie_match_table),
		.suppress_bind_attrs = true,
635
	},
636
	.probe = xgene_pcie_probe,
637
};
638
builtin_platform_driver(xgene_pcie_driver);
639
#endif