pci-keystone-dw.c 15.3 KB
Newer Older
1
/*
2
 * DesignWare application register space functions for Keystone PCI controller
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
 *		http://www.ti.com
 *
 * Author: Murali Karicheri <m-karicheri2@ti.com>
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/irq.h>
#include <linux/irqdomain.h>
17
#include <linux/irqreturn.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>

#include "pcie-designware.h"
#include "pci-keystone.h"

/* Application register defines */
#define LTSSM_EN_VAL		        1
#define LTSSM_STATE_MASK		0x1f
#define LTSSM_STATE_L0			0x11
#define DBI_CS2_EN_VAL			0x20
#define OB_XLAT_EN_VAL		        2

/* Application registers */
#define CMD_STATUS			0x004
#define CFG_SETUP			0x008
#define OB_SIZE				0x030
#define CFG_PCIM_WIN_SZ_IDX		3
#define CFG_PCIM_WIN_CNT		32
#define SPACE0_REMOTE_CFG_OFFSET	0x1000
#define OB_OFFSET_INDEX(n)		(0x200 + (8 * n))
#define OB_OFFSET_HI(n)			(0x204 + (8 * n))

/* IRQ register defines */
#define IRQ_EOI				0x050
#define IRQ_STATUS			0x184
#define IRQ_ENABLE_SET			0x188
#define IRQ_ENABLE_CLR			0x18c

#define MSI_IRQ				0x054
#define MSI0_IRQ_STATUS			0x104
#define MSI0_IRQ_ENABLE_SET		0x108
#define MSI0_IRQ_ENABLE_CLR		0x10c
#define IRQ_STATUS			0x184
#define MSI_IRQ_OFFSET			4

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
/* Error IRQ bits */
#define ERR_AER		BIT(5)	/* ECRC error */
#define ERR_AXI		BIT(4)	/* AXI tag lookup fatal error */
#define ERR_CORR	BIT(3)	/* Correctable error */
#define ERR_NONFATAL	BIT(2)	/* Non-fatal error */
#define ERR_FATAL	BIT(1)	/* Fatal error */
#define ERR_SYS		BIT(0)	/* System (fatal, non-fatal, or correctable) */
#define ERR_IRQ_ALL	(ERR_AER | ERR_AXI | ERR_CORR | \
			 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
#define ERR_FATAL_IRQ	(ERR_FATAL | ERR_AXI)
#define ERR_IRQ_STATUS_RAW		0x1c0
#define ERR_IRQ_STATUS			0x1c4
#define ERR_IRQ_ENABLE_SET		0x1c8
#define ERR_IRQ_ENABLE_CLR		0x1cc

72 73 74
/* Config space registers */
#define DEBUG0				0x728

75
#define to_keystone_pcie(x)	dev_get_drvdata((x)->dev)
76 77 78 79 80 81 82 83

static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
					     u32 *bit_pos)
{
	*reg_offset = offset % 8;
	*bit_pos = offset >> 3;
}

84
phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
85
{
86 87
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
88 89 90 91

	return ks_pcie->app.start + MSI_IRQ;
}

92 93 94 95 96 97 98 99 100 101
static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
	return readl(ks_pcie->va_app_base + offset);
}

static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
{
	writel(val, ks_pcie->va_app_base + offset);
}

102 103
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
104 105 106
	struct dw_pcie *pci = ks_pcie->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = pci->dev;
107 108 109
	u32 pending, vector;
	int src, virq;

110
	pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
111 112 113 114 115 116 117 118 119

	/*
	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
	 * shows 1, 9, 17, 25 and so forth
	 */
	for (src = 0; src < 4; src++) {
		if (BIT(src) & pending) {
			vector = offset + (src << 3);
			virq = irq_linear_revmap(pp->irq_domain, vector);
120
			dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
121 122 123 124 125 126 127 128 129 130 131 132
				src, vector, virq);
			generic_handle_irq(virq);
		}
	}
}

static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
{
	u32 offset, reg_offset, bit_pos;
	struct keystone_pcie *ks_pcie;
	struct msi_desc *msi;
	struct pcie_port *pp;
133
	struct dw_pcie *pci;
134

135
	msi = irq_data_get_msi_desc(d);
136
	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
137 138
	pci = to_dw_pcie_from_pp(pp);
	ks_pcie = to_keystone_pcie(pci);
139
	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
140 141
	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);

142 143 144
	ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
			 BIT(bit_pos));
	ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
145 146 147 148 149
}

void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
	u32 reg_offset, bit_pos;
150 151
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
152 153

	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
154 155
	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
			 BIT(bit_pos));
156 157 158 159 160
}

void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
	u32 reg_offset, bit_pos;
161 162
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
163 164

	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
165 166
	ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
			 BIT(bit_pos));
167 168 169 170 171 172 173 174
}

static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
	struct msi_desc *msi;
	struct pcie_port *pp;
	u32 offset;

175
	msi = irq_data_get_msi_desc(d);
176
	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
177
	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
178 179 180 181

	/* Mask the end point if PVM implemented */
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		if (msi->msi_attrib.maskbit)
182
			pci_msi_mask_irq(d);
183 184 185 186 187 188 189 190 191 192 193
	}

	ks_dw_pcie_msi_clear_irq(pp, offset);
}

static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
	struct msi_desc *msi;
	struct pcie_port *pp;
	u32 offset;

194
	msi = irq_data_get_msi_desc(d);
195
	pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
196
	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
197 198 199 200

	/* Mask the end point if PVM implemented */
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		if (msi->msi_attrib.maskbit)
201
			pci_msi_unmask_irq(d);
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	}

	ks_dw_pcie_msi_set_irq(pp, offset);
}

static struct irq_chip ks_dw_pcie_msi_irq_chip = {
	.name = "Keystone-PCIe-MSI-IRQ",
	.irq_ack = ks_dw_pcie_msi_irq_ack,
	.irq_mask = ks_dw_pcie_msi_irq_mask,
	.irq_unmask = ks_dw_pcie_msi_irq_unmask,
};

static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
			      irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
				 handle_level_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

224
static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
225 226 227
	.map = ks_dw_pcie_msi_map,
};

228
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
229
{
230 231 232
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
	struct device *dev = pci->dev;
233 234 235 236 237 238 239
	int i;

	pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
					MAX_MSI_IRQS,
					&ks_dw_pcie_msi_domain_ops,
					chip);
	if (!pp->irq_domain) {
240
		dev_err(dev, "irq domain init failed\n");
241 242 243 244 245 246 247 248 249 250 251 252 253
		return -ENXIO;
	}

	for (i = 0; i < MAX_MSI_IRQS; i++)
		irq_create_mapping(pp->irq_domain, i);

	return 0;
}

void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
{
	int i;

B
Bjorn Helgaas 已提交
254
	for (i = 0; i < PCI_NUM_INTX; i++)
255
		ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
256 257 258 259
}

void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
260 261
	struct dw_pcie *pci = ks_pcie->pci;
	struct device *dev = pci->dev;
262 263 264
	u32 pending;
	int virq;

265
	pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
266 267 268

	if (BIT(0) & pending) {
		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
269
		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
270 271 272 273
		generic_handle_irq(virq);
	}

	/* EOI the INTx interrupt */
274
	ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
275 276
}

277
void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
278
{
279
	ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
280 281
}

282
irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
283 284 285
{
	u32 status;

286
	status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
287 288 289 290
	if (!status)
		return IRQ_NONE;

	if (status & ERR_FATAL_IRQ)
291
		dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
292
			status);
293 294

	/* Ack the IRQ; status bits are RW1C */
295
	ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
296 297 298
	return IRQ_HANDLED;
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
{
}

static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
{
}

static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
{
}

static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
	.name = "Keystone-PCI-Legacy-IRQ",
	.irq_ack = ks_dw_pcie_ack_legacy_irq,
	.irq_mask = ks_dw_pcie_mask_legacy_irq,
	.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
};

static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
				unsigned int irq, irq_hw_number_t hw_irq)
{
	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
				 handle_level_irq);
	irq_set_chip_data(irq, d->host_data);

	return 0;
}

static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
	.map = ks_dw_pcie_init_legacy_irq_map,
	.xlate = irq_domain_xlate_onetwocell,
};

/**
 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
 * registers
 *
 * Since modification of dbi_cs2 involves different clock domain, read the
 * status back to ensure the transition is complete.
 */
340
static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
341 342 343
{
	u32 val;

344 345
	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
	ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
346 347

	do {
348
		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
349 350 351 352 353 354 355 356 357
	} while (!(val & DBI_CS2_EN_VAL));
}

/**
 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
 *
 * Since modification of dbi_cs2 involves different clock domain, read the
 * status back to ensure the transition is complete.
 */
358
static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
359 360 361
{
	u32 val;

362 363
	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
	ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
364 365

	do {
366
		val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
367 368 369 370 371
	} while (val & DBI_CS2_EN_VAL);
}

void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
372 373
	struct dw_pcie *pci = ks_pcie->pci;
	struct pcie_port *pp = &pci->pp;
374
	u32 start = pp->mem->start, end = pp->mem->end;
375
	int i, tr_size;
376
	u32 val;
377 378

	/* Disable BARs for inbound access */
379
	ks_dw_pcie_set_dbi_mode(ks_pcie);
380 381
	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
382
	ks_dw_pcie_clear_dbi_mode(ks_pcie);
383 384

	/* Set outbound translation size per window division */
385
	ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
386 387 388 389 390

	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;

	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
391 392
		ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
		ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
393 394 395 396
		start += tr_size;
	}

	/* Enable OB translation */
397 398
	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
	ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
}

/**
 * ks_pcie_cfg_setup() - Set up configuration space address for a device
 *
 * @ks_pcie: ptr to keystone_pcie structure
 * @bus: Bus number the device is residing on
 * @devfn: device, function number info
 *
 * Forms and returns the address of configuration space mapped in PCIESS
 * address space 0.  Also configures CFG_SETUP for remote configuration space
 * access.
 *
 * The address space has two regions to access configuration - local and remote.
 * We access local region for bus 0 (as RC is attached on bus 0) and remote
 * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
 * we will do TYPE 0 access as it will be on our secondary bus (logical).
 * CFG_SETUP is needed only for remote configuration access.
 */
static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
				       unsigned int devfn)
{
	u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
422 423
	struct dw_pcie *pci = ks_pcie->pci;
	struct pcie_port *pp = &pci->pp;
424 425 426
	u32 regval;

	if (bus == 0)
427
		return pci->dbi_base;
428 429 430 431 432 433 434 435 436 437 438

	regval = (bus << 16) | (device << 8) | function;

	/*
	 * Since Bus#1 will be a virtual bus, we need to have TYPE0
	 * access only.
	 * TYPE 1
	 */
	if (bus != 1)
		regval |= BIT(24);

439
	ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
440 441 442 443 444 445
	return pp->va_cfg0_base;
}

int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
			     unsigned int devfn, int where, int size, u32 *val)
{
446 447
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
448 449 450 451 452
	u8 bus_num = bus->number;
	void __iomem *addr;

	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);

453
	return dw_pcie_read(addr + where, size, val);
454 455 456 457 458
}

int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
			     unsigned int devfn, int where, int size, u32 val)
{
459 460
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
461 462 463 464 465
	u8 bus_num = bus->number;
	void __iomem *addr;

	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);

466
	return dw_pcie_write(addr + where, size, val);
467 468 469 470 471 472 473 474 475
}

/**
 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
 *
 * This sets BAR0 to enable inbound access for MSI_IRQ register
 */
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
{
476 477
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
478 479

	/* Configure and set up BAR0 */
480
	ks_dw_pcie_set_dbi_mode(ks_pcie);
481 482

	/* Enable BAR0 */
483 484
	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
485

486
	ks_dw_pcie_clear_dbi_mode(ks_pcie);
487 488 489 490 491

	 /*
	  * For BAR0, just setting bus address for inbound writes (MSI) should
	  * be sufficient.  Use physical address to avoid any conflicts.
	  */
492
	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
493 494 495 496 497
}

/**
 * ks_dw_pcie_link_up() - Check if link up
 */
498
int ks_dw_pcie_link_up(struct dw_pcie *pci)
499
{
500
	u32 val;
501

502
	val = dw_pcie_readl_dbi(pci, DEBUG0);
503 504 505 506 507 508 509 510
	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}

void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
{
	u32 val;

	/* Disable Link training */
511
	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
512
	val &= ~LTSSM_EN_VAL;
513
	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
514 515

	/* Initiate Link Training */
516 517
	val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
	ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
518 519 520 521 522 523 524 525 526 527 528 529
}

/**
 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
 *
 * Ioremap the register resources, initialize legacy irq domain
 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
 * PCI host controller.
 */
int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
				struct device_node *msi_intc_np)
{
530 531 532
	struct dw_pcie *pci = ks_pcie->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = pci->dev;
533
	struct platform_device *pdev = to_platform_device(dev);
534 535 536 537
	struct resource *res;

	/* Index 0 is the config reg. space address */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
538
	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
539 540
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
541 542 543 544 545

	/*
	 * We set these same and is used in pcie rd/wr_other_conf
	 * functions
	 */
546
	pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
547 548 549 550
	pp->va_cfg1_base = pp->va_cfg0_base;

	/* Index 1 is the application reg. space address */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
551
	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
552 553 554
	if (IS_ERR(ks_pcie->va_app_base))
		return PTR_ERR(ks_pcie->va_app_base);

555 556
	ks_pcie->app = *res;

557 558 559
	/* Create legacy IRQ domain */
	ks_pcie->legacy_irq_domain =
			irq_domain_add_linear(ks_pcie->legacy_intc_np,
B
Bjorn Helgaas 已提交
560
					PCI_NUM_INTX,
561 562 563
					&ks_dw_pcie_legacy_irq_domain_ops,
					NULL);
	if (!ks_pcie->legacy_irq_domain) {
564
		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
565 566 567 568 569
		return -EINVAL;
	}

	return dw_pcie_host_init(pp);
}