pci-dra7xx.c 18.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
 *
 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
 *
 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/delay.h>
14 15 16 17 18
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
19
#include <linux/init.h>
20
#include <linux/of_device.h>
21
#include <linux/of_gpio.h>
22
#include <linux/of_pci.h>
23 24 25 26 27 28
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/resource.h>
#include <linux/types.h>
29 30
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

#include "pcie-designware.h"

/* PCIe controller wrapper DRA7XX configuration registers */

#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN		0x0024
#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN		0x0028
#define	ERR_SYS						BIT(0)
#define	ERR_FATAL					BIT(1)
#define	ERR_NONFATAL					BIT(2)
#define	ERR_COR						BIT(3)
#define	ERR_AXI						BIT(4)
#define	ERR_ECRC					BIT(5)
#define	PME_TURN_OFF					BIT(8)
#define	PME_TO_ACK					BIT(9)
#define	PM_PME						BIT(10)
#define	LINK_REQ_RST					BIT(11)
#define	LINK_UP_EVT					BIT(12)
#define	CFG_BME_EVT					BIT(13)
#define	CFG_MSE_EVT					BIT(14)
#define	INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
			ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
			LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)

#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI		0x0034
#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI		0x0038
#define	INTA						BIT(0)
#define	INTB						BIT(1)
#define	INTC						BIT(2)
#define	INTD						BIT(3)
#define	MSI						BIT(4)
#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)

64 65 66 67 68
#define	PCIECTRL_TI_CONF_DEVICE_TYPE			0x0100
#define	DEVICE_TYPE_EP					0x0
#define	DEVICE_TYPE_LEG_EP				0x1
#define	DEVICE_TYPE_RC					0x4

69 70 71 72 73
#define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
#define	LTSSM_EN					0x1

#define	PCIECTRL_DRA7XX_CONF_PHY_CS			0x010C
#define	LINK_UP						BIT(16)
74
#define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF
75

76 77
#define EXP_CAP_ID_OFFSET				0x70

78 79 80 81 82 83 84
#define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124
#define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128

#define	PCIECTRL_TI_CONF_MSI_XMT			0x012c
#define MSI_REQ_GRANT					BIT(0)
#define MSI_VECTOR_SHIFT				7

85
struct dra7xx_pcie {
86
	struct dw_pcie		*pci;
87 88 89
	void __iomem		*base;		/* DT ti_conf */
	int			phy_count;	/* DT phy-names count */
	struct phy		**phy;
90
	int			link_gen;
91
	struct irq_domain	*irq_domain;
92 93 94 95 96
	enum dw_pcie_device_mode mode;
};

struct dra7xx_pcie_of_data {
	enum dw_pcie_device_mode mode;
97 98
};

99
#define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
100 101 102 103 104 105 106 107 108 109 110 111

static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
{
	return readl(pcie->base + offset);
}

static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
				      u32 value)
{
	writel(value, pcie->base + offset);
}

112 113 114 115 116
static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
{
	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
}

117
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
118
{
119
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
120 121 122 123 124
	u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);

	return !!(reg & LINK_UP);
}

125
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
126
{
127 128 129 130 131 132 133 134 135 136 137
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
	u32 reg;

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg &= ~LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
}

static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
{
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
138
	struct device *dev = pci->dev;
139
	u32 reg;
140
	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
141

142
	if (dw_pcie_link_up(pci)) {
143
		dev_err(dev, "link is already up\n");
144 145 146
		return 0;
	}

147
	if (dra7xx->link_gen == 1) {
148
		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
149
			     4, &reg);
150 151 152
		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
153
			dw_pcie_write(pci->dbi_base + exp_cap_off +
154
				      PCI_EXP_LNKCAP, 4, reg);
155 156
		}

157
		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
158
			     2, &reg);
159 160 161
		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
162
			dw_pcie_write(pci->dbi_base + exp_cap_off +
163
				      PCI_EXP_LNKCTL2, 2, reg);
164 165 166
		}
	}

167 168 169 170
	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg |= LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);

171
	return 0;
172 173
}

174
static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
175 176 177
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
			   ~LEG_EP_INTERRUPTS & ~MSI);
178 179 180

	dra7xx_pcie_writel(dra7xx,
			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
181
			   MSI | LEG_EP_INTERRUPTS);
182 183
}

184 185 186 187 188 189 190 191 192 193 194 195 196 197
static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
			   ~INTERRUPTS);
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
			   INTERRUPTS);
}

static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
	dra7xx_pcie_enable_msi_interrupts(dra7xx);
}

198 199
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
200 201
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
202

203 204
	dw_pcie_setup_rc(pp);

205 206
	dra7xx_pcie_establish_link(pci);
	dw_pcie_wait_for_link(pci);
207
	dw_pcie_msi_init(pp);
208
	dra7xx_pcie_enable_interrupts(dra7xx);
209 210
}

211
static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	.host_init = dra7xx_pcie_host_init,
};

static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
				irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops intx_domain_ops = {
	.map = dra7xx_pcie_intx_map,
};

static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
{
230 231 232
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct device *dev = pci->dev;
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
233 234 235 236 237
	struct device_node *node = dev->of_node;
	struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);

	if (!pcie_intc_node) {
		dev_err(dev, "No PCIe Intc node found\n");
238
		return -ENODEV;
239 240
	}

241 242 243
	dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
						   &intx_domain_ops, pp);
	if (!dra7xx->irq_domain) {
244
		dev_err(dev, "Failed to get a INTx IRQ domain\n");
245
		return -ENODEV;
246 247 248 249 250 251 252
	}

	return 0;
}

static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
253
	struct dra7xx_pcie *dra7xx = arg;
254 255
	struct dw_pcie *pci = dra7xx->pci;
	struct pcie_port *pp = &pci->pp;
256 257 258 259 260 261 262 263 264 265 266 267
	u32 reg;

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);

	switch (reg) {
	case MSI:
		dw_handle_msi_irq(pp);
		break;
	case INTA:
	case INTB:
	case INTC:
	case INTD:
268 269
		generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
						    ffs(reg)));
270 271 272 273 274 275 276 277 278 279 280 281
		break;
	}

	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);

	return IRQ_HANDLED;
}


static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
	struct dra7xx_pcie *dra7xx = arg;
282 283
	struct dw_pcie *pci = dra7xx->pci;
	struct device *dev = pci->dev;
284
	struct dw_pcie_ep *ep = &pci->ep;
285 286 287 288 289
	u32 reg;

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);

	if (reg & ERR_SYS)
290
		dev_dbg(dev, "System Error\n");
291 292

	if (reg & ERR_FATAL)
293
		dev_dbg(dev, "Fatal Error\n");
294 295

	if (reg & ERR_NONFATAL)
296
		dev_dbg(dev, "Non Fatal Error\n");
297 298

	if (reg & ERR_COR)
299
		dev_dbg(dev, "Correctable Error\n");
300 301

	if (reg & ERR_AXI)
302
		dev_dbg(dev, "AXI tag lookup fatal Error\n");
303 304

	if (reg & ERR_ECRC)
305
		dev_dbg(dev, "ECRC Error\n");
306 307

	if (reg & PME_TURN_OFF)
308
		dev_dbg(dev,
309 310 311
			"Power Management Event Turn-Off message received\n");

	if (reg & PME_TO_ACK)
312
		dev_dbg(dev,
313 314 315
			"Power Management Turn-Off Ack message received\n");

	if (reg & PM_PME)
316
		dev_dbg(dev, "PM Power Management Event message received\n");
317 318

	if (reg & LINK_REQ_RST)
319
		dev_dbg(dev, "Link Request Reset\n");
320

321 322 323
	if (reg & LINK_UP_EVT) {
		if (dra7xx->mode == DW_PCIE_EP_TYPE)
			dw_pcie_ep_linkup(ep);
324
		dev_dbg(dev, "Link-up state change\n");
325
	}
326 327

	if (reg & CFG_BME_EVT)
328
		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
329 330

	if (reg & CFG_MSE_EVT)
331
		dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
332 333 334 335 336 337

	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);

	return IRQ_HANDLED;
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);

	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}

static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
	mdelay(1);
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
}

static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
				      u8 interrupt_num)
{
	u32 reg;

	reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
	reg |= MSI_REQ_GRANT;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
}

static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
				 enum pci_epc_irq_type type, u8 interrupt_num)
{
	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);

	switch (type) {
	case PCI_EPC_IRQ_LEGACY:
		dra7xx_pcie_raise_legacy_irq(dra7xx);
		break;
	case PCI_EPC_IRQ_MSI:
		dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
		break;
	default:
		dev_err(pci->dev, "UNKNOWN IRQ type\n");
	}

	return 0;
}

static struct dw_pcie_ep_ops pcie_ep_ops = {
	.ep_init = dra7xx_pcie_ep_init,
	.raise_irq = dra7xx_pcie_raise_irq,
};

static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
				     struct platform_device *pdev)
{
	int ret;
	struct dw_pcie_ep *ep;
	struct resource *res;
	struct device *dev = &pdev->dev;
	struct dw_pcie *pci = dra7xx->pci;

	ep = &pci->ep;
	ep->ops = &pcie_ep_ops;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
	pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
	if (!pci->dbi_base)
		return -ENOMEM;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
	pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
	if (!pci->dbi_base2)
		return -ENOMEM;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
	if (!res)
		return -EINVAL;

	ep->phys_base = res->start;
	ep->addr_size = resource_size(res);

	ret = dw_pcie_ep_init(ep);
	if (ret) {
		dev_err(dev, "failed to initialize endpoint\n");
		return ret;
	}

	return 0;
}

426 427
static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
				       struct platform_device *pdev)
428 429
{
	int ret;
430 431 432
	struct dw_pcie *pci = dra7xx->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = pci->dev;
433 434 435 436 437 438 439 440
	struct resource *res;

	pp->irq = platform_get_irq(pdev, 1);
	if (pp->irq < 0) {
		dev_err(dev, "missing IRQ resource\n");
		return -EINVAL;
	}

441
	ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
442
			       IRQF_SHARED | IRQF_NO_THREAD,
443
			       "dra7-pcie-msi",	dra7xx);
444
	if (ret) {
445
		dev_err(dev, "failed to request irq\n");
446 447 448
		return ret;
	}

449 450 451
	ret = dra7xx_pcie_init_irq_domain(pp);
	if (ret < 0)
		return ret;
452 453

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
454 455
	pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
	if (!pci->dbi_base)
456 457 458 459
		return -ENOMEM;

	ret = dw_pcie_host_init(pp);
	if (ret) {
460
		dev_err(dev, "failed to initialize host\n");
461 462 463 464 465 466
		return ret;
	}

	return 0;
}

467
static const struct dw_pcie_ops dw_pcie_ops = {
468
	.cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
469 470
	.start_link = dra7xx_pcie_establish_link,
	.stop_link = dra7xx_pcie_stop_link,
471 472 473
	.link_up = dra7xx_pcie_link_up,
};

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
{
	int phy_count = dra7xx->phy_count;

	while (phy_count--) {
		phy_power_off(dra7xx->phy[phy_count]);
		phy_exit(dra7xx->phy[phy_count]);
	}
}

static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
{
	int phy_count = dra7xx->phy_count;
	int ret;
	int i;

	for (i = 0; i < phy_count; i++) {
		ret = phy_init(dra7xx->phy[i]);
		if (ret < 0)
			goto err_phy;

		ret = phy_power_on(dra7xx->phy[i]);
		if (ret < 0) {
			phy_exit(dra7xx->phy[i]);
			goto err_phy;
		}
	}

	return 0;

err_phy:
	while (--i >= 0) {
		phy_power_off(dra7xx->phy[i]);
		phy_exit(dra7xx->phy[i]);
	}

	return ret;
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
	.mode = DW_PCIE_RC_TYPE,
};

static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
	.mode = DW_PCIE_EP_TYPE,
};

static const struct of_device_id of_dra7xx_pcie_match[] = {
	{
		.compatible = "ti,dra7-pcie",
		.data = &dra7xx_pcie_rc_of_data,
	},
	{
		.compatible = "ti,dra7-pcie-ep",
		.data = &dra7xx_pcie_ep_of_data,
	},
	{},
};

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
/*
 * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
 * @dra7xx: the dra7xx device where the workaround should be applied
 *
 * Access to the PCIe slave port that are not 32-bit aligned will result
 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
 * 0x3.
 *
 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
 */
static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
{
	int ret;
	struct device_node *np = dev->of_node;
	struct of_phandle_args args;
	struct regmap *regmap;

	regmap = syscon_regmap_lookup_by_phandle(np,
						 "ti,syscon-unaligned-access");
	if (IS_ERR(regmap)) {
		dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
		return -EINVAL;
	}

	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
					       2, 0, &args);
	if (ret) {
		dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
		return ret;
	}

	ret = regmap_update_bits(regmap, args.args[0], args.args[1],
				 args.args[1]);
	if (ret)
		dev_err(dev, "failed to enable unaligned access\n");

	of_node_put(args.np);

	return ret;
}

575 576 577 578 579 580 581 582 583 584
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
	u32 reg;
	int ret;
	int irq;
	int i;
	int phy_count;
	struct phy **phy;
	void __iomem *base;
	struct resource *res;
585
	struct dw_pcie *pci;
586
	struct pcie_port *pp;
587
	struct dra7xx_pcie *dra7xx;
588 589 590
	struct device *dev = &pdev->dev;
	struct device_node *np = dev->of_node;
	char name[10];
591
	struct gpio_desc *reset;
592 593 594 595 596 597 598 599 600 601
	const struct of_device_id *match;
	const struct dra7xx_pcie_of_data *data;
	enum dw_pcie_device_mode mode;

	match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
	if (!match)
		return -EINVAL;

	data = (struct dra7xx_pcie_of_data *)match->data;
	mode = (enum dw_pcie_device_mode)data->mode;
602 603 604 605 606

	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
	if (!dra7xx)
		return -ENOMEM;

607 608 609 610 611 612 613 614
	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
	if (!pci)
		return -ENOMEM;

	pci->dev = dev;
	pci->ops = &dw_pcie_ops;

	pp = &pci->pp;
615 616
	pp->ops = &dra7xx_pcie_host_ops;

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "missing IRQ resource\n");
		return -EINVAL;
	}

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
	if (!base)
		return -ENOMEM;

	phy_count = of_property_count_strings(np, "phy-names");
	if (phy_count < 0) {
		dev_err(dev, "unable to find the strings\n");
		return phy_count;
	}

	phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
	if (!phy)
		return -ENOMEM;

	for (i = 0; i < phy_count; i++) {
		snprintf(name, sizeof(name), "pcie-phy%d", i);
		phy[i] = devm_phy_get(dev, name);
		if (IS_ERR(phy[i]))
			return PTR_ERR(phy[i]);
	}

	dra7xx->base = base;
	dra7xx->phy = phy;
647
	dra7xx->pci = pci;
648 649
	dra7xx->phy_count = phy_count;

650 651 652 653 654 655
	ret = dra7xx_pcie_enable_phy(dra7xx);
	if (ret) {
		dev_err(dev, "failed to enable phy\n");
		return ret;
	}

656 657
	platform_set_drvdata(pdev, dra7xx);

658 659
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
660
	if (ret < 0) {
661
		dev_err(dev, "pm_runtime_get_sync failed\n");
662
		goto err_get_sync;
663 664
	}

665 666 667 668
	reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
	if (IS_ERR(reset)) {
		ret = PTR_ERR(reset);
		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
669
		goto err_gpio;
670 671 672 673 674 675
	}

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg &= ~LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);

676 677 678 679
	dra7xx->link_gen = of_pci_get_max_link_speed(np);
	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
		dra7xx->link_gen = 2;

680 681 682 683 684 685 686 687 688 689 690
	switch (mode) {
	case DW_PCIE_RC_TYPE:
		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
				   DEVICE_TYPE_RC);
		ret = dra7xx_add_pcie_port(dra7xx, pdev);
		if (ret < 0)
			goto err_gpio;
		break;
	case DW_PCIE_EP_TYPE:
		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
				   DEVICE_TYPE_EP);
691 692 693 694 695

		ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
		if (ret)
			goto err_gpio;

696 697 698 699 700 701 702 703
		ret = dra7xx_add_pcie_ep(dra7xx, pdev);
		if (ret < 0)
			goto err_gpio;
		break;
	default:
		dev_err(dev, "INVALID device type %d\n", mode);
	}
	dra7xx->mode = mode;
704

705 706 707 708 709 710 711
	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
	if (ret) {
		dev_err(dev, "failed to request irq\n");
		goto err_gpio;
	}

712 713
	return 0;

714
err_gpio:
715
	pm_runtime_put(dev);
716 717

err_get_sync:
718
	pm_runtime_disable(dev);
719
	dra7xx_pcie_disable_phy(dra7xx);
720 721 722 723

	return ret;
}

724
#ifdef CONFIG_PM_SLEEP
725 726 727
static int dra7xx_pcie_suspend(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
728
	struct dw_pcie *pci = dra7xx->pci;
729 730
	u32 val;

731 732 733
	if (dra7xx->mode != DW_PCIE_RC_TYPE)
		return 0;

734
	/* clear MSE */
735
	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
736
	val &= ~PCI_COMMAND_MEMORY;
737
	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
738 739 740 741 742 743 744

	return 0;
}

static int dra7xx_pcie_resume(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
745
	struct dw_pcie *pci = dra7xx->pci;
746 747
	u32 val;

748 749 750
	if (dra7xx->mode != DW_PCIE_RC_TYPE)
		return 0;

751
	/* set MSE */
752
	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
753
	val |= PCI_COMMAND_MEMORY;
754
	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
755 756 757 758

	return 0;
}

759 760 761 762
static int dra7xx_pcie_suspend_noirq(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);

763
	dra7xx_pcie_disable_phy(dra7xx);
764 765 766 767 768 769 770 771 772

	return 0;
}

static int dra7xx_pcie_resume_noirq(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
	int ret;

773 774 775 776
	ret = dra7xx_pcie_enable_phy(dra7xx);
	if (ret) {
		dev_err(dev, "failed to enable phy\n");
		return ret;
777 778 779 780 781 782 783
	}

	return 0;
}
#endif

static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
784
	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
785 786 787 788
	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
				      dra7xx_pcie_resume_noirq)
};

789 790 791 792
static struct platform_driver dra7xx_pcie_driver = {
	.driver = {
		.name	= "dra7-pcie",
		.of_match_table = of_dra7xx_pcie_match,
793
		.suppress_bind_attrs = true,
794
		.pm	= &dra7xx_pcie_pm_ops,
795 796
	},
};
797
builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);