pci-imx6.c 33.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * PCIe host controller driver for Freescale i.MX6 SoCs
 *
 * Copyright (C) 2013 Kosagi
6
 *		https://www.kosagi.com
7 8 9 10
 *
 * Author: Sean Cross <xobs@kosagi.com>
 */

11
#include <linux/bitfield.h>
12 13 14 15 16 17
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19 20
#include <linux/module.h>
#include <linux/of_gpio.h>
21
#include <linux/of_device.h>
22
#include <linux/of_address.h>
23 24 25
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
26
#include <linux/regulator/consumer.h>
27 28 29
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
L
Lucas Stach 已提交
30
#include <linux/interrupt.h>
31
#include <linux/reset.h>
L
Leonard Crestez 已提交
32 33
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
34 35 36

#include "pcie-designware.h"

37 38 39 40 41 42
#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000

43
#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
44

45 46
enum imx6_pcie_variants {
	IMX6Q,
47 48
	IMX6SX,
	IMX6QP,
49
	IMX7D,
50
	IMX8MQ,
51 52
};

53
#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
54
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
55
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
56

A
Andrey Smirnov 已提交
57 58
struct imx6_pcie_drvdata {
	enum imx6_pcie_variants variant;
59
	u32 flags;
60
	int dbi_length;
61 62
};

63
struct imx6_pcie {
64
	struct dw_pcie		*pci;
65
	int			reset_gpio;
66
	bool			gpio_active_high;
L
Lucas Stach 已提交
67 68
	struct clk		*pcie_bus;
	struct clk		*pcie_phy;
69
	struct clk		*pcie_inbound_axi;
L
Lucas Stach 已提交
70
	struct clk		*pcie;
71
	struct clk		*pcie_aux;
72
	struct regmap		*iomuxc_gpr;
73
	u32			controller_id;
74 75
	struct reset_control	*pciephy_reset;
	struct reset_control	*apps_reset;
76
	struct reset_control	*turnoff_reset;
77 78 79 80 81
	u32			tx_deemph_gen1;
	u32			tx_deemph_gen2_3p5db;
	u32			tx_deemph_gen2_6db;
	u32			tx_swing_full;
	u32			tx_swing_low;
82
	int			link_gen;
83
	struct regulator	*vpcie;
84
	void __iomem		*phy_base;
L
Leonard Crestez 已提交
85 86 87 88 89

	/* power domain for pcie */
	struct device		*pd_pcie;
	/* power domain for pcie phy */
	struct device		*pd_pcie_phy;
A
Andrey Smirnov 已提交
90
	const struct imx6_pcie_drvdata *drvdata;
91 92
};

93 94
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
95
#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
96

97
/* PCIe Root Complex registers (memory-mapped) */
98
#define PCIE_RC_IMX6_MSI_CAP			0x50
99 100 101 102 103
#define PCIE_RC_LCR				0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf

104 105
#define PCIE_RC_LCSR				0x80

106 107 108 109
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700

#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
110 111 112 113 114
#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
#define PCIE_PHY_CTRL_WR		BIT(18)
#define PCIE_PHY_CTRL_RD		BIT(19)
115 116

#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
117
#define PCIE_PHY_STAT_ACK		BIT(16)
118

119 120
#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C

121
/* PHY registers (not memory-mapped) */
122
#define PCIE_PHY_ATEOVRD			0x10
123
#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
124 125 126 127 128 129
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1

#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
130
#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
131

132
#define PCIE_PHY_RX_ASIC_OUT 0x100D
133
#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
134

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
/* iMX7 PCIe PHY registers */
#define PCIE_PHY_CMN_REG4		0x14
/* These are probably the bits that *aren't* DCC_FB_EN */
#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29

#define PCIE_PHY_CMN_REG15	        0x54
#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)

#define PCIE_PHY_CMN_REG24		0x90
#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)

#define PCIE_PHY_CMN_REG26		0x98
#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC

152
#define PHY_RX_OVRD_IN_LO 0x1005
153 154
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
155

156
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
157
{
158
	struct dw_pcie *pci = imx6_pcie->pci;
159
	bool val;
160 161 162 163
	u32 max_iterations = 10;
	u32 wait_counter = 0;

	do {
164 165
		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
			PCIE_PHY_STAT_ACK;
166 167 168 169 170 171 172 173 174 175 176
		wait_counter++;

		if (val == exp_val)
			return 0;

		udelay(1);
	} while (wait_counter < max_iterations);

	return -ETIMEDOUT;
}

177
static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
178
{
179
	struct dw_pcie *pci = imx6_pcie->pci;
180 181 182
	u32 val;
	int ret;

183
	val = PCIE_PHY_CTRL_DATA(addr);
184
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
185

186
	val |= PCIE_PHY_CTRL_CAP_ADR;
187
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
188

189
	ret = pcie_phy_poll_ack(imx6_pcie, true);
190 191 192
	if (ret)
		return ret;

193
	val = PCIE_PHY_CTRL_DATA(addr);
194
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
195

196
	return pcie_phy_poll_ack(imx6_pcie, false);
197 198 199
}

/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
200
static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
201
{
202
	struct dw_pcie *pci = imx6_pcie->pci;
203
	u32 phy_ctl;
204 205
	int ret;

206
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
207 208 209 210
	if (ret)
		return ret;

	/* assert Read signal */
211
	phy_ctl = PCIE_PHY_CTRL_RD;
212
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
213

214
	ret = pcie_phy_poll_ack(imx6_pcie, true);
215 216 217
	if (ret)
		return ret;

218
	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
219 220

	/* deassert Read signal */
221
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
222

223
	return pcie_phy_poll_ack(imx6_pcie, false);
224 225
}

226
static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
227
{
228
	struct dw_pcie *pci = imx6_pcie->pci;
229 230 231 232 233
	u32 var;
	int ret;

	/* write addr */
	/* cap addr */
234
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
235 236 237
	if (ret)
		return ret;

238
	var = PCIE_PHY_CTRL_DATA(data);
239
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
240 241

	/* capture data */
242
	var |= PCIE_PHY_CTRL_CAP_DAT;
243
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
244

245
	ret = pcie_phy_poll_ack(imx6_pcie, true);
246 247 248 249
	if (ret)
		return ret;

	/* deassert cap data */
250
	var = PCIE_PHY_CTRL_DATA(data);
251
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
252 253

	/* wait for ack de-assertion */
254
	ret = pcie_phy_poll_ack(imx6_pcie, false);
255 256 257 258
	if (ret)
		return ret;

	/* assert wr signal */
259
	var = PCIE_PHY_CTRL_WR;
260
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
261 262

	/* wait for ack */
263
	ret = pcie_phy_poll_ack(imx6_pcie, true);
264 265 266 267
	if (ret)
		return ret;

	/* deassert wr signal */
268
	var = PCIE_PHY_CTRL_DATA(data);
269
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
270 271

	/* wait for ack de-assertion */
272
	ret = pcie_phy_poll_ack(imx6_pcie, false);
273 274 275
	if (ret)
		return ret;

276
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
277 278 279 280

	return 0;
}

281
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
282
{
283
	u16 tmp;
284

285 286 287
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
		return;

288
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
289 290
	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
291
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
292 293 294

	usleep_range(2000, 3000);

295
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
296 297
	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
298
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
299 300
}

301
#ifdef CONFIG_ARM
302 303 304 305
/*  Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
		unsigned int fsr, struct pt_regs *regs)
{
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	unsigned long pc = instruction_pointer(regs);
	unsigned long instr = *(unsigned long *)pc;
	int reg = (instr >> 12) & 15;

	/*
	 * If the instruction being executed was a read,
	 * make it look like it read all-ones.
	 */
	if ((instr & 0x0c100000) == 0x04100000) {
		unsigned long val;

		if (instr & 0x00400000)
			val = 255;
		else
			val = -1;

		regs->uregs[reg] = val;
		regs->ARM_pc += 4;
		return 0;
	}

	if ((instr & 0x0e100090) == 0x00100090) {
		regs->uregs[reg] = -1;
		regs->ARM_pc += 4;
		return 0;
	}

	return 1;
334
}
335
#endif
336

L
Leonard Crestez 已提交
337 338 339 340 341 342 343 344 345 346 347 348
static int imx6_pcie_attach_pd(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct device_link *link;

	/* Do nothing when in a single power domain */
	if (dev->pm_domain)
		return 0;

	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
	if (IS_ERR(imx6_pcie->pd_pcie))
		return PTR_ERR(imx6_pcie->pd_pcie);
349 350 351
	/* Do nothing when power domain missing */
	if (!imx6_pcie->pd_pcie)
		return 0;
L
Leonard Crestez 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364
	link = device_link_add(dev, imx6_pcie->pd_pcie,
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie pd.\n");
		return -EINVAL;
	}

	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
	if (IS_ERR(imx6_pcie->pd_pcie_phy))
		return PTR_ERR(imx6_pcie->pd_pcie_phy);

365
	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
L
Leonard Crestez 已提交
366 367 368
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
369 370 371
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
		return -EINVAL;
L
Leonard Crestez 已提交
372 373 374 375 376
	}

	return 0;
}

377
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
378
{
379 380
	struct device *dev = imx6_pcie->pci->dev;

A
Andrey Smirnov 已提交
381
	switch (imx6_pcie->drvdata->variant) {
382
	case IMX7D:
383
	case IMX8MQ:
384 385 386
		reset_control_assert(imx6_pcie->pciephy_reset);
		reset_control_assert(imx6_pcie->apps_reset);
		break;
387
	case IMX6SX:
388 389 390 391 392 393 394
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
		/* Force PCIe PHY reset */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
395
		break;
396 397 398 399 400
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST,
				   IMX6Q_GPR1_PCIE_SW_RST);
		break;
401 402 403 404 405 406
	case IMX6Q:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
		break;
407
	}
408 409 410 411 412 413 414 415

	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		int ret = regulator_disable(imx6_pcie->vpcie);

		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
416 417
}

418 419 420 421 422 423
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}

424 425
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
426 427
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
428
	unsigned int offset;
429
	int ret = 0;
430

A
Andrey Smirnov 已提交
431
	switch (imx6_pcie->drvdata->variant) {
432
	case IMX6SX:
433 434
		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
		if (ret) {
435
			dev_err(dev, "unable to enable pcie_axi clock\n");
436
			break;
437 438 439 440
		}

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
441
		break;
442
	case IMX6QP:
443 444 445 446 447 448 449 450 451 452
	case IMX6Q:
		/* power up core phy and enable ref clock */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
		/*
		 * the async reset input need ref clock to sync internally,
		 * when the ref clock comes after reset, internal synced
		 * reset time is too short, cannot meet the requirement.
		 * add one ~10us delay here.
		 */
453
		usleep_range(10, 100);
454 455 456
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
		break;
457 458
	case IMX7D:
		break;
459
	case IMX8MQ:
460 461 462 463 464 465
		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
		if (ret) {
			dev_err(dev, "unable to enable pcie_aux clock\n");
			break;
		}

466 467 468 469 470 471 472 473 474 475 476 477
		offset = imx6_pcie_grp_offset(imx6_pcie);
		/*
		 * Set the over ride low and enabled
		 * make sure that REF_CLK is turned on.
		 */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
				   0);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
		break;
478 479
	}

480
	return ret;
481 482
}

483 484 485 486 487
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
	u32 val;
	struct device *dev = imx6_pcie->pci->dev;

488 489 490 491 492 493
	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
				     IOMUXC_GPR22, val,
				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
				     PHY_PLL_LOCK_WAIT_TIMEOUT))
		dev_err(dev, "PCIe PLL lock timeout\n");
494 495
}

496
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
497
{
498 499
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
500 501
	int ret;

502 503 504 505 506 507 508 509 510
	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
		ret = regulator_enable(imx6_pcie->vpcie);
		if (ret) {
			dev_err(dev, "failed to enable vpcie regulator: %d\n",
				ret);
			return;
		}
	}

L
Lucas Stach 已提交
511
	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
512
	if (ret) {
513
		dev_err(dev, "unable to enable pcie_phy clock\n");
514
		goto err_pcie_phy;
515 516
	}

L
Lucas Stach 已提交
517
	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
518
	if (ret) {
519
		dev_err(dev, "unable to enable pcie_bus clock\n");
L
Lucas Stach 已提交
520
		goto err_pcie_bus;
521 522
	}

L
Lucas Stach 已提交
523
	ret = clk_prepare_enable(imx6_pcie->pcie);
524
	if (ret) {
525
		dev_err(dev, "unable to enable pcie clock\n");
L
Lucas Stach 已提交
526
		goto err_pcie;
527 528
	}

529 530
	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
	if (ret) {
531
		dev_err(dev, "unable to enable pcie ref clock\n");
532 533
		goto err_ref_clk;
	}
534

535 536 537
	/* allow the clocks to stabilize */
	usleep_range(200, 500);

538
	/* Some boards don't have PCIe reset GPIO. */
539
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
540 541
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					imx6_pcie->gpio_active_high);
542
		msleep(100);
543 544
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					!imx6_pcie->gpio_active_high);
545
	}
546

A
Andrey Smirnov 已提交
547
	switch (imx6_pcie->drvdata->variant) {
548 549 550
	case IMX8MQ:
		reset_control_deassert(imx6_pcie->pciephy_reset);
		break;
551 552
	case IMX7D:
		reset_control_deassert(imx6_pcie->pciephy_reset);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
		 * oscillate, especially when cold.  This turns off "Duty-cycle
		 * Corrector" and other mysterious undocumented things.
		 */
		if (likely(imx6_pcie->phy_base)) {
			/* De-assert DCC_FB_EN */
			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
			/* Assert RX_EQS and RX_EQS_SEL */
			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
				| PCIE_PHY_CMN_REG24_RX_EQ,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
			/* Assert ATT_MODE */
			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
		} else {
			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
		}

573 574
		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
		break;
575
	case IMX6SX:
576 577
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
578 579 580 581 582 583 584 585 586 587
		break;
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST, 0);

		usleep_range(200, 500);
		break;
	case IMX6Q:		/* Nothing to do */
		break;
	}
588

589
	return;
590

591 592
err_ref_clk:
	clk_disable_unprepare(imx6_pcie->pcie);
L
Lucas Stach 已提交
593 594 595 596
err_pcie:
	clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
	clk_disable_unprepare(imx6_pcie->pcie_phy);
597 598 599 600 601 602 603
err_pcie_phy:
	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		ret = regulator_disable(imx6_pcie->vpcie);
		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
604 605
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
	unsigned int mask, val;

	if (imx6_pcie->drvdata->variant == IMX8MQ &&
	    imx6_pcie->controller_id == 1) {
		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
				    PCI_EXP_TYPE_ROOT_PORT);
	} else {
		mask = IMX6Q_GPR12_DEVICE_TYPE;
		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
				  PCI_EXP_TYPE_ROOT_PORT);
	}

	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
}

624
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
625
{
A
Andrey Smirnov 已提交
626
	switch (imx6_pcie->drvdata->variant) {
627 628 629 630 631 632 633 634 635 636
	case IMX8MQ:
		/*
		 * TODO: Currently this code assumes external
		 * oscillator is being used
		 */
		regmap_update_bits(imx6_pcie->iomuxc_gpr,
				   imx6_pcie_grp_offset(imx6_pcie),
				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
		break;
637 638 639 640 641
	case IMX7D:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
		break;
	case IMX6SX:
642 643 644
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
				   IMX6SX_GPR12_PCIE_RX_EQ_2);
645
		fallthrough;
646 647 648
	default:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
649

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		/* configure constant input signal to the pcie ctrl and phy */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
				   imx6_pcie->tx_deemph_gen1 << 0);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
				   imx6_pcie->tx_deemph_gen2_6db << 12);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_FULL,
				   imx6_pcie->tx_swing_full << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_LOW,
				   imx6_pcie->tx_swing_low << 25);
		break;
	}
671

672
	imx6_pcie_configure_type(imx6_pcie);
673 674
}

675 676 677 678
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
	int mult, div;
679
	u16 val;
680

681 682 683
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
		return 0;

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	switch (phy_rate) {
	case 125000000:
		/*
		 * The default settings of the MPLL are for a 125MHz input
		 * clock, so no need to reconfigure anything in that case.
		 */
		return 0;
	case 100000000:
		mult = 25;
		div = 0;
		break;
	case 200000000:
		mult = 25;
		div = 1;
		break;
	default:
		dev_err(imx6_pcie->pci->dev,
			"Unsupported PHY reference clock rate %lu\n", phy_rate);
		return -EINVAL;
	}

	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);

	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
	val |= PCIE_PHY_ATEOVRD_EN;
	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);

	return 0;
}

722
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
723
{
724 725
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
726
	u32 tmp;
727 728 729
	unsigned int retries;

	for (retries = 0; retries < 200; retries++) {
730
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
731 732 733 734 735 736
		/* Test if the speed change finished. */
		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
			return 0;
		usleep_range(100, 1000);
	}

737
	dev_err(dev, "Speed change timeout\n");
738
	return -ETIMEDOUT;
739 740
}

741 742 743 744
static void imx6_pcie_ltssm_enable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

A
Andrey Smirnov 已提交
745
	switch (imx6_pcie->drvdata->variant) {
746 747 748 749 750 751 752 753
	case IMX6Q:
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2,
				   IMX6Q_GPR12_PCIE_CTL_2);
		break;
	case IMX7D:
754
	case IMX8MQ:
755 756 757 758 759
		reset_control_deassert(imx6_pcie->apps_reset);
		break;
	}
}

760
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
761
{
762 763
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
764
	u32 tmp;
765
	int ret;
766 767 768 769 770 771

	/*
	 * Force Gen1 operation when starting the link.  In case the link is
	 * started in Gen2 mode, there is a possibility the devices on the
	 * bus will not be detected at all.  This happens with PCIe switches.
	 */
772
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
773 774
	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
775
	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
776 777

	/* Start LTSSM. */
778
	imx6_pcie_ltssm_enable(dev);
779

780
	ret = dw_pcie_wait_for_link(pci);
781
	if (ret)
782
		goto err_reset_phy;
783

784 785
	if (imx6_pcie->link_gen == 2) {
		/* Allow Gen2 mode after the link is up. */
786
		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
787 788
		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
789
		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
790

791
		/*
792 793
		 * Start Directed Speed Change so the best possible
		 * speed both link partners support can be negotiated.
794
		 */
795 796 797 798
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
		tmp |= PORT_LOGIC_SPEED_CHANGE;
		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);

799 800
		if (imx6_pcie->drvdata->flags &
		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
			/*
			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
			 * from i.MX6 family when no link speed transition
			 * occurs and we go Gen1 -> yep, Gen1. The difference
			 * is that, in such case, it will not be cleared by HW
			 * which will cause the following code to report false
			 * failure.
			 */

			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
			if (ret) {
				dev_err(dev, "Failed to bring link up!\n");
				goto err_reset_phy;
			}
		}
816

817
		/* Make sure link training is finished as well! */
818
		ret = dw_pcie_wait_for_link(pci);
819 820 821 822
		if (ret) {
			dev_err(dev, "Failed to bring link up!\n");
			goto err_reset_phy;
		}
823 824
	} else {
		dev_info(dev, "Link: Gen2 disabled\n");
825 826
	}

827
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
828
	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
829
	return 0;
830 831

err_reset_phy:
832
	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
833 834
		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
835
	imx6_pcie_reset_phy(imx6_pcie);
836
	return ret;
837 838
}

839
static int imx6_pcie_host_init(struct pcie_port *pp)
840
{
841 842
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
843

844 845 846
	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
847
	imx6_setup_phy_mpll(imx6_pcie);
848
	dw_pcie_setup_rc(pp);
849
	imx6_pcie_establish_link(imx6_pcie);
L
Lucas Stach 已提交
850 851 852

	if (IS_ENABLED(CONFIG_PCI_MSI))
		dw_pcie_msi_init(pp);
853 854

	return 0;
855 856
}

857
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
858 859 860
	.host_init = imx6_pcie_host_init,
};

861 862
static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
			      struct platform_device *pdev)
863
{
864 865 866
	struct dw_pcie *pci = imx6_pcie->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = &pdev->dev;
867 868
	int ret;

L
Lucas Stach 已提交
869 870
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
871
		if (pp->msi_irq < 0)
872
			return pp->msi_irq;
L
Lucas Stach 已提交
873 874
	}

875 876 877 878
	pp->ops = &imx6_pcie_host_ops;

	ret = dw_pcie_host_init(pp);
	if (ret) {
879
		dev_err(dev, "failed to initialize host\n");
880 881 882 883 884 885
		return ret;
	}

	return 0;
}

886
static const struct dw_pcie_ops dw_pcie_ops = {
887
	/* No special ops needed, but pcie-designware still expects this struct */
888 889
};

890 891 892 893 894
#ifdef CONFIG_PM_SLEEP
static void imx6_pcie_ltssm_disable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

A
Andrey Smirnov 已提交
895
	switch (imx6_pcie->drvdata->variant) {
896 897 898 899 900 901 902 903 904 905 906 907 908
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0);
		break;
	case IMX7D:
		reset_control_assert(imx6_pcie->apps_reset);
		break;
	default:
		dev_err(dev, "ltssm_disable not supported\n");
	}
}

909 910
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
911 912 913 914 915 916 917 918 919 920
	struct device *dev = imx6_pcie->pci->dev;

	/* Some variants have a turnoff reset in DT */
	if (imx6_pcie->turnoff_reset) {
		reset_control_assert(imx6_pcie->turnoff_reset);
		reset_control_deassert(imx6_pcie->turnoff_reset);
		goto pm_turnoff_sleep;
	}

	/* Others poke directly at IOMUXC registers */
A
Andrey Smirnov 已提交
921
	switch (imx6_pcie->drvdata->variant) {
922 923 924 925 926 927 928 929 930 931 932
	case IMX6SX:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
		break;
	default:
		dev_err(dev, "PME_Turn_Off not implemented\n");
		return;
	}
933 934 935 936 937 938 939 940

	/*
	 * Components with an upstream port must respond to
	 * PME_Turn_Off with PME_TO_Ack but we can't check.
	 *
	 * The standard recommends a 1-10ms timeout after which to
	 * proceed anyway as if acks were received.
	 */
941
pm_turnoff_sleep:
942 943 944
	usleep_range(1000, 10000);
}

945 946 947 948 949 950
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
	clk_disable_unprepare(imx6_pcie->pcie);
	clk_disable_unprepare(imx6_pcie->pcie_phy);
	clk_disable_unprepare(imx6_pcie->pcie_bus);

A
Andrey Smirnov 已提交
951
	switch (imx6_pcie->drvdata->variant) {
952 953 954 955
	case IMX6SX:
		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
		break;
	case IMX7D:
956 957 958
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
959
		break;
960 961 962
	case IMX8MQ:
		clk_disable_unprepare(imx6_pcie->pcie_aux);
		break;
963 964
	default:
		break;
965 966 967 968 969 970 971
	}
}

static int imx6_pcie_suspend_noirq(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

972
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
973 974
		return 0;

975
	imx6_pcie_pm_turnoff(imx6_pcie);
976 977 978 979 980 981 982 983 984 985 986 987
	imx6_pcie_clk_disable(imx6_pcie);
	imx6_pcie_ltssm_disable(dev);

	return 0;
}

static int imx6_pcie_resume_noirq(struct device *dev)
{
	int ret;
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct pcie_port *pp = &imx6_pcie->pci->pp;

988
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		return 0;

	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
	dw_pcie_setup_rc(pp);

	ret = imx6_pcie_establish_link(imx6_pcie);
	if (ret < 0)
		dev_info(dev, "pcie link is down after resume.\n");

	return 0;
}
#endif

static const struct dev_pm_ops imx6_pcie_pm_ops = {
	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
				      imx6_pcie_resume_noirq)
};

1009
static int imx6_pcie_probe(struct platform_device *pdev)
1010
{
1011
	struct device *dev = &pdev->dev;
1012
	struct dw_pcie *pci;
1013
	struct imx6_pcie *imx6_pcie;
1014
	struct device_node *np;
1015
	struct resource *dbi_base;
1016
	struct device_node *node = dev->of_node;
1017
	int ret;
1018
	u16 val;
1019

1020
	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1021 1022 1023
	if (!imx6_pcie)
		return -ENOMEM;

1024 1025 1026 1027 1028 1029
	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
	if (!pci)
		return -ENOMEM;

	pci->dev = dev;
	pci->ops = &dw_pcie_ops;
1030

1031
	imx6_pcie->pci = pci;
A
Andrey Smirnov 已提交
1032
	imx6_pcie->drvdata = of_device_get_match_data(dev);
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	/* Find the PHY if one is defined, only imx7d uses it */
	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
	if (np) {
		struct resource res;

		ret = of_address_to_resource(np, 0, &res);
		if (ret) {
			dev_err(dev, "Unable to map PCIe PHY\n");
			return ret;
		}
		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
		if (IS_ERR(imx6_pcie->phy_base)) {
			dev_err(dev, "Unable to map PCIe PHY\n");
			return PTR_ERR(imx6_pcie->phy_base);
		}
	}
1050

1051
	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1052 1053 1054
	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
1055 1056

	/* Fetch GPIOs */
1057 1058
	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
	imx6_pcie->gpio_active_high = of_property_read_bool(node,
1059
						"reset-gpio-active-high");
1060
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1061
		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1062 1063 1064 1065
				imx6_pcie->gpio_active_high ?
					GPIOF_OUT_INIT_HIGH :
					GPIOF_OUT_INIT_LOW,
				"PCIe reset");
1066
		if (ret) {
1067
			dev_err(dev, "unable to get reset gpio\n");
1068 1069
			return ret;
		}
1070 1071
	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
		return imx6_pcie->reset_gpio;
1072
	}
1073 1074

	/* Fetch clocks */
1075
	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
L
Lucas Stach 已提交
1076
	if (IS_ERR(imx6_pcie->pcie_phy)) {
1077
		dev_err(dev, "pcie_phy clock source missing or invalid\n");
L
Lucas Stach 已提交
1078
		return PTR_ERR(imx6_pcie->pcie_phy);
1079 1080
	}

1081
	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
L
Lucas Stach 已提交
1082
	if (IS_ERR(imx6_pcie->pcie_bus)) {
1083
		dev_err(dev, "pcie_bus clock source missing or invalid\n");
L
Lucas Stach 已提交
1084
		return PTR_ERR(imx6_pcie->pcie_bus);
1085 1086
	}

1087
	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
L
Lucas Stach 已提交
1088
	if (IS_ERR(imx6_pcie->pcie)) {
1089
		dev_err(dev, "pcie clock source missing or invalid\n");
L
Lucas Stach 已提交
1090
		return PTR_ERR(imx6_pcie->pcie);
1091 1092
	}

A
Andrey Smirnov 已提交
1093
	switch (imx6_pcie->drvdata->variant) {
1094
	case IMX6SX:
1095
		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1096 1097
							   "pcie_inbound_axi");
		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
1098
			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
1099 1100
			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
		}
1101
		break;
1102
	case IMX8MQ:
1103 1104 1105 1106 1107
		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
		if (IS_ERR(imx6_pcie->pcie_aux)) {
			dev_err(dev, "pcie_aux clock source missing or invalid\n");
			return PTR_ERR(imx6_pcie->pcie_aux);
		}
1108
		fallthrough;
1109
	case IMX7D:
1110 1111 1112
		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
			imx6_pcie->controller_id = 1;

1113 1114
		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
									    "pciephy");
1115
		if (IS_ERR(imx6_pcie->pciephy_reset)) {
1116
			dev_err(dev, "Failed to get PCIEPHY reset control\n");
1117 1118 1119
			return PTR_ERR(imx6_pcie->pciephy_reset);
		}

1120 1121
		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
									 "apps");
1122
		if (IS_ERR(imx6_pcie->apps_reset)) {
1123
			dev_err(dev, "Failed to get PCIE APPS reset control\n");
1124 1125 1126 1127 1128
			return PTR_ERR(imx6_pcie->apps_reset);
		}
		break;
	default:
		break;
1129 1130
	}

1131 1132 1133 1134 1135 1136 1137
	/* Grab turnoff reset */
	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
	if (IS_ERR(imx6_pcie->turnoff_reset)) {
		dev_err(dev, "Failed to get TURNOFF reset control\n");
		return PTR_ERR(imx6_pcie->turnoff_reset);
	}

1138 1139 1140 1141
	/* Grab GPR config register range */
	imx6_pcie->iomuxc_gpr =
		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1142
		dev_err(dev, "unable to find iomuxc registers\n");
1143
		return PTR_ERR(imx6_pcie->iomuxc_gpr);
1144
	}
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

	/* Grab PCIe PHY Tx Settings */
	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
				 &imx6_pcie->tx_deemph_gen1))
		imx6_pcie->tx_deemph_gen1 = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
				 &imx6_pcie->tx_deemph_gen2_3p5db))
		imx6_pcie->tx_deemph_gen2_3p5db = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
				 &imx6_pcie->tx_deemph_gen2_6db))
		imx6_pcie->tx_deemph_gen2_6db = 20;

	if (of_property_read_u32(node, "fsl,tx-swing-full",
				 &imx6_pcie->tx_swing_full))
		imx6_pcie->tx_swing_full = 127;

	if (of_property_read_u32(node, "fsl,tx-swing-low",
				 &imx6_pcie->tx_swing_low))
		imx6_pcie->tx_swing_low = 127;
1166

1167
	/* Limit link speed */
1168
	ret = of_property_read_u32(node, "fsl,max-link-speed",
1169 1170 1171 1172
				   &imx6_pcie->link_gen);
	if (ret)
		imx6_pcie->link_gen = 1;

1173 1174
	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
	if (IS_ERR(imx6_pcie->vpcie)) {
1175 1176
		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
			return PTR_ERR(imx6_pcie->vpcie);
1177 1178 1179
		imx6_pcie->vpcie = NULL;
	}

1180 1181
	platform_set_drvdata(pdev, imx6_pcie);

L
Leonard Crestez 已提交
1182 1183 1184 1185
	ret = imx6_pcie_attach_pd(dev);
	if (ret)
		return ret;

1186
	ret = imx6_add_pcie_port(imx6_pcie, pdev);
1187
	if (ret < 0)
1188
		return ret;
1189

1190 1191 1192 1193 1194 1195 1196 1197
	if (pci_msi_enabled()) {
		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
					PCI_MSI_FLAGS);
		val |= PCI_MSI_FLAGS_ENABLE;
		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
				   val);
	}

1198 1199 1200
	return 0;
}

1201 1202 1203 1204 1205
static void imx6_pcie_shutdown(struct platform_device *pdev)
{
	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);

	/* bring down link, so bootloader gets clean state in case of reboot */
1206
	imx6_pcie_assert_core_reset(imx6_pcie);
1207 1208
}

A
Andrey Smirnov 已提交
1209 1210 1211
static const struct imx6_pcie_drvdata drvdata[] = {
	[IMX6Q] = {
		.variant = IMX6Q,
1212 1213
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1214
		.dbi_length = 0x200,
A
Andrey Smirnov 已提交
1215 1216 1217
	},
	[IMX6SX] = {
		.variant = IMX6SX,
1218
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1219 1220
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
A
Andrey Smirnov 已提交
1221 1222 1223
	},
	[IMX6QP] = {
		.variant = IMX6QP,
1224 1225
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
A
Andrey Smirnov 已提交
1226 1227 1228
	},
	[IMX7D] = {
		.variant = IMX7D,
1229
		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
A
Andrey Smirnov 已提交
1230
	},
1231 1232 1233
	[IMX8MQ] = {
		.variant = IMX8MQ,
	},
A
Andrey Smirnov 已提交
1234 1235
};

1236
static const struct of_device_id imx6_pcie_of_match[] = {
A
Andrey Smirnov 已提交
1237 1238 1239 1240
	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
1241
	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
1242 1243 1244 1245 1246 1247
	{},
};

static struct platform_driver imx6_pcie_driver = {
	.driver = {
		.name	= "imx6q-pcie",
1248
		.of_match_table = imx6_pcie_of_match,
1249
		.suppress_bind_attrs = true,
1250
		.pm = &imx6_pcie_pm_ops,
1251
		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1252
	},
1253
	.probe    = imx6_pcie_probe,
1254
	.shutdown = imx6_pcie_shutdown,
1255 1256
};

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
static void imx6_pcie_quirk(struct pci_dev *dev)
{
	struct pci_bus *bus = dev->bus;
	struct pcie_port *pp = bus->sysdata;

	/* Bus parent is the PCI bridge, its parent is this platform driver */
	if (!bus->dev.parent || !bus->dev.parent->parent)
		return;

	/* Make sure we only quirk devices associated with this driver */
	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
		return;

1270
	if (pci_is_root_bus(bus)) {
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);

		/*
		 * Limit config length to avoid the kernel reading beyond
		 * the register set and causing an abort on i.MX 6Quad
		 */
		if (imx6_pcie->drvdata->dbi_length) {
			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
					dev->cfg_size);
		}
	}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);

1288 1289
static int __init imx6_pcie_init(void)
{
1290
#ifdef CONFIG_ARM
1291 1292 1293 1294 1295 1296 1297
	/*
	 * Since probe() can be deferred we need to make sure that
	 * hook_fault_code is not called after __init memory is freed
	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
	 * we can install the handler here without risking it
	 * accessing some uninitialized driver state.
	 */
1298 1299
	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
			"external abort on non-linefetch");
1300
#endif
1301 1302

	return platform_driver_register(&imx6_pcie_driver);
1303
}
1304
device_initcall(imx6_pcie_init);