pci-dra7xx.c 22.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
 *
5
 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
6 7 8 9
 *
 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
 */

10
#include <linux/delay.h>
11
#include <linux/device.h>
12 13 14 15 16
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
17
#include <linux/init.h>
18
#include <linux/of_device.h>
19
#include <linux/of_gpio.h>
20
#include <linux/of_pci.h>
21 22 23 24 25 26
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/resource.h>
#include <linux/types.h>
27 28
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
29
#include <linux/gpio/consumer.h>
30

31
#include "../../pci.h"
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include "pcie-designware.h"

/* PCIe controller wrapper DRA7XX configuration registers */

#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN		0x0024
#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN		0x0028
#define	ERR_SYS						BIT(0)
#define	ERR_FATAL					BIT(1)
#define	ERR_NONFATAL					BIT(2)
#define	ERR_COR						BIT(3)
#define	ERR_AXI						BIT(4)
#define	ERR_ECRC					BIT(5)
#define	PME_TURN_OFF					BIT(8)
#define	PME_TO_ACK					BIT(9)
#define	PM_PME						BIT(10)
#define	LINK_REQ_RST					BIT(11)
#define	LINK_UP_EVT					BIT(12)
#define	CFG_BME_EVT					BIT(13)
#define	CFG_MSE_EVT					BIT(14)
#define	INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
			ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
			LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)

#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI		0x0034
#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI		0x0038
#define	INTA						BIT(0)
#define	INTB						BIT(1)
#define	INTC						BIT(2)
#define	INTD						BIT(3)
#define	MSI						BIT(4)
#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)

64 65 66 67 68
#define	PCIECTRL_TI_CONF_DEVICE_TYPE			0x0100
#define	DEVICE_TYPE_EP					0x0
#define	DEVICE_TYPE_LEG_EP				0x1
#define	DEVICE_TYPE_RC					0x4

69 70 71 72 73
#define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
#define	LTSSM_EN					0x1

#define	PCIECTRL_DRA7XX_CONF_PHY_CS			0x010C
#define	LINK_UP						BIT(16)
74
#define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF
75

76 77 78 79 80 81 82
#define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124
#define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128

#define	PCIECTRL_TI_CONF_MSI_XMT			0x012c
#define MSI_REQ_GRANT					BIT(0)
#define MSI_VECTOR_SHIFT				7

83 84 85 86
#define PCIE_1LANE_2LANE_SELECTION			BIT(13)
#define PCIE_B1C0_MODE_SEL				BIT(2)
#define PCIE_B0_B1_TSYNCEN				BIT(0)

87
struct dra7xx_pcie {
88
	struct dw_pcie		*pci;
89 90 91
	void __iomem		*base;		/* DT ti_conf */
	int			phy_count;	/* DT phy-names count */
	struct phy		**phy;
92
	struct irq_domain	*irq_domain;
93 94 95 96 97
	enum dw_pcie_device_mode mode;
};

struct dra7xx_pcie_of_data {
	enum dw_pcie_device_mode mode;
98
	u32 b1co_mode_sel_mask;
99 100
};

101
#define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
102 103 104 105 106 107 108 109 110 111 112 113

static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
{
	return readl(pcie->base + offset);
}

static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
				      u32 value)
{
	writel(value, pcie->base + offset);
}

114
static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
115 116 117 118
{
	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
}

119
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
120
{
121
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
122 123 124 125 126
	u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);

	return !!(reg & LINK_UP);
}

127
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
128
{
129 130 131 132 133 134 135 136 137 138 139
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
	u32 reg;

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg &= ~LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
}

static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
{
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
140
	struct device *dev = pci->dev;
141
	u32 reg;
142

143
	if (dw_pcie_link_up(pci)) {
144
		dev_err(dev, "link is already up\n");
145 146 147 148 149 150 151
		return 0;
	}

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg |= LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);

152
	return 0;
153 154
}

155
static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
156 157
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
158
			   LEG_EP_INTERRUPTS | MSI);
159 160 161

	dra7xx_pcie_writel(dra7xx,
			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
162
			   MSI | LEG_EP_INTERRUPTS);
163 164
}

165 166 167
static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
168
			   INTERRUPTS);
169 170 171 172 173 174 175 176 177 178
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
			   INTERRUPTS);
}

static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
	dra7xx_pcie_enable_msi_interrupts(dra7xx);
}

179
static int dra7xx_pcie_host_init(struct pcie_port *pp)
180
{
181 182
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
183

184 185
	dw_pcie_setup_rc(pp);

186 187
	dra7xx_pcie_establish_link(pci);
	dw_pcie_wait_for_link(pci);
188
	dw_pcie_msi_init(pp);
189
	dra7xx_pcie_enable_interrupts(dra7xx);
190 191

	return 0;
192 193 194 195 196 197 198 199 200 201 202 203 204
}

static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
				irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops intx_domain_ops = {
	.map = dra7xx_pcie_intx_map,
205
	.xlate = pci_irqd_intx_xlate,
206 207
};

208
static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
209
{
210
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
211 212
	unsigned long val;
	int pos, irq;
213

214 215 216 217
	val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
				   (index * MSI_REG_CTRL_BLOCK_SIZE));
	if (!val)
		return 0;
218

219 220 221 222 223 224 225
	pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
	while (pos != MAX_MSI_IRQS_PER_CTRL) {
		irq = irq_find_mapping(pp->irq_domain,
				       (index * MAX_MSI_IRQS_PER_CTRL) + pos);
		generic_handle_irq(irq);
		pos++;
		pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
226 227
	}

228
	return 1;
229 230
}

231
static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
232
{
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	int ret, i, count, num_ctrls;

	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;

	/**
	 * Need to make sure all MSI status bits read 0 before exiting.
	 * Else, new MSI IRQs are not registered by the wrapper. Have an
	 * upperbound for the loop and exit the IRQ in case of IRQ flood
	 * to avoid locking up system in interrupt context.
	 */
	count = 0;
	do {
		ret = 0;

		for (i = 0; i < num_ctrls; i++)
			ret |= dra7xx_pcie_handle_msi(pp, i);
		count++;
	} while (ret && count <= 1000);

	if (count > 1000)
		dev_warn_ratelimited(pci->dev,
				     "Too many MSI IRQs to handle\n");
}

static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct dra7xx_pcie *dra7xx;
	struct dw_pcie *pci;
	struct pcie_port *pp;
264 265
	unsigned long reg;
	u32 virq, bit;
266

267 268 269 270 271 272
	chained_irq_enter(chip, desc);

	pp = irq_desc_get_handler_data(desc);
	pci = to_dw_pcie_from_pp(pp);
	dra7xx = to_dra7xx_pcie(pci);

273
	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
274
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
275 276 277

	switch (reg) {
	case MSI:
278
		dra7xx_pcie_handle_msi_irq(pp);
279 280 281 282 283
		break;
	case INTA:
	case INTB:
	case INTC:
	case INTD:
284 285 286 287 288
		for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
			virq = irq_find_mapping(dra7xx->irq_domain, bit);
			if (virq)
				generic_handle_irq(virq);
		}
289 290 291
		break;
	}

292
	chained_irq_exit(chip, desc);
293 294 295 296 297
}

static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
	struct dra7xx_pcie *dra7xx = arg;
298 299
	struct dw_pcie *pci = dra7xx->pci;
	struct device *dev = pci->dev;
300
	struct dw_pcie_ep *ep = &pci->ep;
301 302 303 304 305
	u32 reg;

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);

	if (reg & ERR_SYS)
306
		dev_dbg(dev, "System Error\n");
307 308

	if (reg & ERR_FATAL)
309
		dev_dbg(dev, "Fatal Error\n");
310 311

	if (reg & ERR_NONFATAL)
312
		dev_dbg(dev, "Non Fatal Error\n");
313 314

	if (reg & ERR_COR)
315
		dev_dbg(dev, "Correctable Error\n");
316 317

	if (reg & ERR_AXI)
318
		dev_dbg(dev, "AXI tag lookup fatal Error\n");
319 320

	if (reg & ERR_ECRC)
321
		dev_dbg(dev, "ECRC Error\n");
322 323

	if (reg & PME_TURN_OFF)
324
		dev_dbg(dev,
325 326 327
			"Power Management Event Turn-Off message received\n");

	if (reg & PME_TO_ACK)
328
		dev_dbg(dev,
329 330 331
			"Power Management Turn-Off Ack message received\n");

	if (reg & PM_PME)
332
		dev_dbg(dev, "PM Power Management Event message received\n");
333 334

	if (reg & LINK_REQ_RST)
335
		dev_dbg(dev, "Link Request Reset\n");
336

337 338 339
	if (reg & LINK_UP_EVT) {
		if (dra7xx->mode == DW_PCIE_EP_TYPE)
			dw_pcie_ep_linkup(ep);
340
		dev_dbg(dev, "Link-up state change\n");
341
	}
342 343

	if (reg & CFG_BME_EVT)
344
		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
345 346

	if (reg & CFG_MSE_EVT)
347
		dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
348 349 350 351 352 353

	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);

	return IRQ_HANDLED;
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
{
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct device *dev = pci->dev;
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
	struct device_node *node = dev->of_node;
	struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);

	if (!pcie_intc_node) {
		dev_err(dev, "No PCIe Intc node found\n");
		return -ENODEV;
	}

	irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
					 pp);
	dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
						   &intx_domain_ops, pp);
	of_node_put(pcie_intc_node);
	if (!dra7xx->irq_domain) {
		dev_err(dev, "Failed to get a INTx IRQ domain\n");
		return -ENODEV;
	}

	return 0;
}

static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
	.host_init = dra7xx_pcie_host_init,
};

384 385 386 387
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
388 389
	enum pci_barno bar;

390
	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
391
		dw_pcie_ep_reset_bar(pci, bar);
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}

static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
{
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
	mdelay(1);
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
}

static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
				      u8 interrupt_num)
{
	u32 reg;

	reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
	reg |= MSI_REQ_GRANT;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
}

413
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
414
				 enum pci_epc_irq_type type, u16 interrupt_num)
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
{
	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);

	switch (type) {
	case PCI_EPC_IRQ_LEGACY:
		dra7xx_pcie_raise_legacy_irq(dra7xx);
		break;
	case PCI_EPC_IRQ_MSI:
		dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
		break;
	default:
		dev_err(pci->dev, "UNKNOWN IRQ type\n");
	}

	return 0;
}

433 434 435 436 437 438 439 440 441 442 443 444
static const struct pci_epc_features dra7xx_pcie_epc_features = {
	.linkup_notifier = true,
	.msi_capable = true,
	.msix_capable = false,
};

static const struct pci_epc_features*
dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
{
	return &dra7xx_pcie_epc_features;
}

445
static const struct dw_pcie_ep_ops pcie_ep_ops = {
446 447
	.ep_init = dra7xx_pcie_ep_init,
	.raise_irq = dra7xx_pcie_raise_irq,
448
	.get_features = dra7xx_pcie_get_features,
449 450 451 452 453 454 455 456 457 458 459 460 461
};

static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
				     struct platform_device *pdev)
{
	int ret;
	struct dw_pcie_ep *ep;
	struct device *dev = &pdev->dev;
	struct dw_pcie *pci = dra7xx->pci;

	ep = &pci->ep;
	ep->ops = &pcie_ep_ops;

462
	pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
463 464
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
465

466 467
	pci->dbi_base2 =
		devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
468 469
	if (IS_ERR(pci->dbi_base2))
		return PTR_ERR(pci->dbi_base2);
470 471 472 473 474 475 476 477 478 479

	ret = dw_pcie_ep_init(ep);
	if (ret) {
		dev_err(dev, "failed to initialize endpoint\n");
		return ret;
	}

	return 0;
}

480 481
static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
				       struct platform_device *pdev)
482 483
{
	int ret;
484 485 486
	struct dw_pcie *pci = dra7xx->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = pci->dev;
487 488

	pp->irq = platform_get_irq(pdev, 1);
489
	if (pp->irq < 0)
490
		return pp->irq;
491

492 493 494
	/* MSI IRQ is muxed */
	pp->msi_irq = -ENODEV;

495 496 497
	ret = dra7xx_pcie_init_irq_domain(pp);
	if (ret < 0)
		return ret;
498

499
	pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
500 501
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
502

503 504
	pp->ops = &dra7xx_pcie_host_ops;

505 506
	ret = dw_pcie_host_init(pp);
	if (ret) {
507
		dev_err(dev, "failed to initialize host\n");
508 509 510 511 512 513
		return ret;
	}

	return 0;
}

514
static const struct dw_pcie_ops dw_pcie_ops = {
515
	.cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
516 517
	.start_link = dra7xx_pcie_establish_link,
	.stop_link = dra7xx_pcie_stop_link,
518 519 520
	.link_up = dra7xx_pcie_link_up,
};

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
{
	int phy_count = dra7xx->phy_count;

	while (phy_count--) {
		phy_power_off(dra7xx->phy[phy_count]);
		phy_exit(dra7xx->phy[phy_count]);
	}
}

static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
{
	int phy_count = dra7xx->phy_count;
	int ret;
	int i;

	for (i = 0; i < phy_count; i++) {
538 539 540 541
		ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
		if (ret < 0)
			goto err_phy;

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
		ret = phy_init(dra7xx->phy[i]);
		if (ret < 0)
			goto err_phy;

		ret = phy_power_on(dra7xx->phy[i]);
		if (ret < 0) {
			phy_exit(dra7xx->phy[i]);
			goto err_phy;
		}
	}

	return 0;

err_phy:
	while (--i >= 0) {
		phy_power_off(dra7xx->phy[i]);
		phy_exit(dra7xx->phy[i]);
	}

	return ret;
}

564 565 566 567 568 569 570 571
static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
	.mode = DW_PCIE_RC_TYPE,
};

static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
	.mode = DW_PCIE_EP_TYPE,
};

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
	.b1co_mode_sel_mask = BIT(2),
	.mode = DW_PCIE_RC_TYPE,
};

static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
	.b1co_mode_sel_mask = GENMASK(3, 2),
	.mode = DW_PCIE_RC_TYPE,
};

static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
	.b1co_mode_sel_mask = BIT(2),
	.mode = DW_PCIE_EP_TYPE,
};

static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
	.b1co_mode_sel_mask = GENMASK(3, 2),
	.mode = DW_PCIE_EP_TYPE,
};

592 593 594 595 596 597 598 599 600
static const struct of_device_id of_dra7xx_pcie_match[] = {
	{
		.compatible = "ti,dra7-pcie",
		.data = &dra7xx_pcie_rc_of_data,
	},
	{
		.compatible = "ti,dra7-pcie-ep",
		.data = &dra7xx_pcie_ep_of_data,
	},
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	{
		.compatible = "ti,dra746-pcie-rc",
		.data = &dra746_pcie_rc_of_data,
	},
	{
		.compatible = "ti,dra726-pcie-rc",
		.data = &dra726_pcie_rc_of_data,
	},
	{
		.compatible = "ti,dra746-pcie-ep",
		.data = &dra746_pcie_ep_of_data,
	},
	{
		.compatible = "ti,dra726-pcie-ep",
		.data = &dra726_pcie_ep_of_data,
	},
617 618 619
	{},
};

620
/*
621
 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
622 623 624 625 626 627 628 629 630
 * @dra7xx: the dra7xx device where the workaround should be applied
 *
 * Access to the PCIe slave port that are not 32-bit aligned will result
 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
 * 0x3.
 *
 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
 */
631
static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
{
	int ret;
	struct device_node *np = dev->of_node;
	struct of_phandle_args args;
	struct regmap *regmap;

	regmap = syscon_regmap_lookup_by_phandle(np,
						 "ti,syscon-unaligned-access");
	if (IS_ERR(regmap)) {
		dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
		return -EINVAL;
	}

	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
					       2, 0, &args);
	if (ret) {
		dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
		return ret;
	}

	ret = regmap_update_bits(regmap, args.args[0], args.args[1],
				 args.args[1]);
	if (ret)
		dev_err(dev, "failed to enable unaligned access\n");

	of_node_put(args.np);

	return ret;
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
static int dra7xx_pcie_configure_two_lane(struct device *dev,
					  u32 b1co_mode_sel_mask)
{
	struct device_node *np = dev->of_node;
	struct regmap *pcie_syscon;
	unsigned int pcie_reg;
	u32 mask;
	u32 val;

	pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
	if (IS_ERR(pcie_syscon)) {
		dev_err(dev, "unable to get ti,syscon-lane-sel\n");
		return -EINVAL;
	}

	if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
				       &pcie_reg)) {
		dev_err(dev, "couldn't get lane selection reg offset\n");
		return -EINVAL;
	}

	mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
	val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
	regmap_update_bits(pcie_syscon, pcie_reg, mask, val);

	return 0;
}

690 691 692 693 694 695 696 697
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
	u32 reg;
	int ret;
	int irq;
	int i;
	int phy_count;
	struct phy **phy;
698
	struct device_link **link;
699
	void __iomem *base;
700 701
	struct dw_pcie *pci;
	struct dra7xx_pcie *dra7xx;
702 703 704
	struct device *dev = &pdev->dev;
	struct device_node *np = dev->of_node;
	char name[10];
705
	struct gpio_desc *reset;
706 707 708
	const struct of_device_id *match;
	const struct dra7xx_pcie_of_data *data;
	enum dw_pcie_device_mode mode;
709
	u32 b1co_mode_sel_mask;
710 711 712 713 714 715 716

	match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
	if (!match)
		return -EINVAL;

	data = (struct dra7xx_pcie_of_data *)match->data;
	mode = (enum dw_pcie_device_mode)data->mode;
717
	b1co_mode_sel_mask = data->b1co_mode_sel_mask;
718 719 720 721 722

	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
	if (!dra7xx)
		return -ENOMEM;

723 724 725 726 727 728 729
	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
	if (!pci)
		return -ENOMEM;

	pci->dev = dev;
	pci->ops = &dw_pcie_ops;

730
	irq = platform_get_irq(pdev, 0);
731
	if (irq < 0)
732
		return irq;
733

734 735 736
	base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
	if (IS_ERR(base))
		return PTR_ERR(base);
737 738 739 740 741 742 743

	phy_count = of_property_count_strings(np, "phy-names");
	if (phy_count < 0) {
		dev_err(dev, "unable to find the strings\n");
		return phy_count;
	}

744
	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
745 746 747
	if (!phy)
		return -ENOMEM;

748
	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
749 750 751
	if (!link)
		return -ENOMEM;

752 753 754 755 756
	for (i = 0; i < phy_count; i++) {
		snprintf(name, sizeof(name), "pcie-phy%d", i);
		phy[i] = devm_phy_get(dev, name);
		if (IS_ERR(phy[i]))
			return PTR_ERR(phy[i]);
757 758 759 760 761 762

		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
		if (!link[i]) {
			ret = -EINVAL;
			goto err_link;
		}
763 764 765 766
	}

	dra7xx->base = base;
	dra7xx->phy = phy;
767
	dra7xx->pci = pci;
768 769
	dra7xx->phy_count = phy_count;

770 771 772 773 774 775
	if (phy_count == 2) {
		ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
		if (ret < 0)
			dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
	}

776 777 778 779 780 781
	ret = dra7xx_pcie_enable_phy(dra7xx);
	if (ret) {
		dev_err(dev, "failed to enable phy\n");
		return ret;
	}

782 783
	platform_set_drvdata(pdev, dra7xx);

784 785
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
786
	if (ret < 0) {
787
		dev_err(dev, "pm_runtime_get_sync failed\n");
788
		goto err_get_sync;
789 790
	}

791 792 793 794
	reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
	if (IS_ERR(reset)) {
		ret = PTR_ERR(reset);
		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
795
		goto err_gpio;
796 797 798 799 800 801
	}

	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
	reg &= ~LTSSM_EN;
	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);

802 803
	switch (mode) {
	case DW_PCIE_RC_TYPE:
804 805 806 807 808
		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
			ret = -ENODEV;
			goto err_gpio;
		}

809 810
		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
				   DEVICE_TYPE_RC);
811 812 813 814 815

		ret = dra7xx_pcie_unaligned_memaccess(dev);
		if (ret)
			dev_err(dev, "WA for Errata i870 not applied\n");

816 817 818 819 820
		ret = dra7xx_add_pcie_port(dra7xx, pdev);
		if (ret < 0)
			goto err_gpio;
		break;
	case DW_PCIE_EP_TYPE:
821 822 823 824 825
		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
			ret = -ENODEV;
			goto err_gpio;
		}

826 827
		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
				   DEVICE_TYPE_EP);
828

829
		ret = dra7xx_pcie_unaligned_memaccess(dev);
830 831 832
		if (ret)
			goto err_gpio;

833 834 835 836 837 838 839 840
		ret = dra7xx_add_pcie_ep(dra7xx, pdev);
		if (ret < 0)
			goto err_gpio;
		break;
	default:
		dev_err(dev, "INVALID device type %d\n", mode);
	}
	dra7xx->mode = mode;
841

842 843 844 845 846 847 848
	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
	if (ret) {
		dev_err(dev, "failed to request irq\n");
		goto err_gpio;
	}

849 850
	return 0;

851
err_gpio:
852
err_get_sync:
853
	pm_runtime_put(dev);
854
	pm_runtime_disable(dev);
855
	dra7xx_pcie_disable_phy(dra7xx);
856

857 858 859 860
err_link:
	while (--i >= 0)
		device_link_del(link[i]);

861 862 863
	return ret;
}

864
#ifdef CONFIG_PM_SLEEP
865 866 867
static int dra7xx_pcie_suspend(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
868
	struct dw_pcie *pci = dra7xx->pci;
869 870
	u32 val;

871 872 873
	if (dra7xx->mode != DW_PCIE_RC_TYPE)
		return 0;

874
	/* clear MSE */
875
	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
876
	val &= ~PCI_COMMAND_MEMORY;
877
	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
878 879 880 881 882 883 884

	return 0;
}

static int dra7xx_pcie_resume(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
885
	struct dw_pcie *pci = dra7xx->pci;
886 887
	u32 val;

888 889 890
	if (dra7xx->mode != DW_PCIE_RC_TYPE)
		return 0;

891
	/* set MSE */
892
	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
893
	val |= PCI_COMMAND_MEMORY;
894
	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
895 896 897 898

	return 0;
}

899 900 901 902
static int dra7xx_pcie_suspend_noirq(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);

903
	dra7xx_pcie_disable_phy(dra7xx);
904 905 906 907 908 909 910 911 912

	return 0;
}

static int dra7xx_pcie_resume_noirq(struct device *dev)
{
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
	int ret;

913 914 915 916
	ret = dra7xx_pcie_enable_phy(dra7xx);
	if (ret) {
		dev_err(dev, "failed to enable phy\n");
		return ret;
917 918 919 920 921 922
	}

	return 0;
}
#endif

923
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
{
	struct device *dev = &pdev->dev;
	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
	int ret;

	dra7xx_pcie_stop_link(dra7xx->pci);

	ret = pm_runtime_put_sync(dev);
	if (ret < 0)
		dev_dbg(dev, "pm_runtime_put_sync failed\n");

	pm_runtime_disable(dev);
	dra7xx_pcie_disable_phy(dra7xx);
}

939
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
940
	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
941 942 943 944
	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
				      dra7xx_pcie_resume_noirq)
};

945 946 947 948
static struct platform_driver dra7xx_pcie_driver = {
	.driver = {
		.name	= "dra7-pcie",
		.of_match_table = of_dra7xx_pcie_match,
949
		.suppress_bind_attrs = true,
950
		.pm	= &dra7xx_pcie_pm_ops,
951
	},
952
	.shutdown = dra7xx_pcie_shutdown,
953
};
954
builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);