pci-imx6.c 32.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/*
 * PCIe host controller driver for Freescale i.MX6 SoCs
 *
 * Copyright (C) 2013 Kosagi
 *		http://www.kosagi.com
 *
 * Author: Sean Cross <xobs@kosagi.com>
 */

11
#include <linux/bitfield.h>
12 13 14 15 16 17
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
18
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
19 20
#include <linux/module.h>
#include <linux/of_gpio.h>
21
#include <linux/of_device.h>
22
#include <linux/of_address.h>
23 24 25
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
26
#include <linux/regulator/consumer.h>
27 28 29
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
L
Lucas Stach 已提交
30
#include <linux/interrupt.h>
31
#include <linux/reset.h>
L
Leonard Crestez 已提交
32 33
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
34 35 36

#include "pcie-designware.h"

37 38 39 40 41 42
#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000

43
#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
44

45 46
enum imx6_pcie_variants {
	IMX6Q,
47 48
	IMX6SX,
	IMX6QP,
49
	IMX7D,
50
	IMX8MQ,
51 52
};

53
#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
54
#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
55
#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
56

A
Andrey Smirnov 已提交
57 58
struct imx6_pcie_drvdata {
	enum imx6_pcie_variants variant;
59
	u32 flags;
60 61
};

62
struct imx6_pcie {
63
	struct dw_pcie		*pci;
64
	int			reset_gpio;
65
	bool			gpio_active_high;
L
Lucas Stach 已提交
66 67
	struct clk		*pcie_bus;
	struct clk		*pcie_phy;
68
	struct clk		*pcie_inbound_axi;
L
Lucas Stach 已提交
69
	struct clk		*pcie;
70
	struct clk		*pcie_aux;
71
	struct regmap		*iomuxc_gpr;
72
	u32			controller_id;
73 74
	struct reset_control	*pciephy_reset;
	struct reset_control	*apps_reset;
75
	struct reset_control	*turnoff_reset;
76 77 78 79 80
	u32			tx_deemph_gen1;
	u32			tx_deemph_gen2_3p5db;
	u32			tx_deemph_gen2_6db;
	u32			tx_swing_full;
	u32			tx_swing_low;
81
	int			link_gen;
82
	struct regulator	*vpcie;
83
	void __iomem		*phy_base;
L
Leonard Crestez 已提交
84 85 86 87 88

	/* power domain for pcie */
	struct device		*pd_pcie;
	/* power domain for pcie phy */
	struct device		*pd_pcie_phy;
A
Andrey Smirnov 已提交
89
	const struct imx6_pcie_drvdata *drvdata;
90 91
};

92 93
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
94
#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
95

96
/* PCIe Root Complex registers (memory-mapped) */
97
#define PCIE_RC_IMX6_MSI_CAP			0x50
98 99 100 101 102
#define PCIE_RC_LCR				0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf

103 104
#define PCIE_RC_LCSR				0x80

105 106 107 108
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700

#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
109 110 111 112 113
#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
#define PCIE_PHY_CTRL_WR		BIT(18)
#define PCIE_PHY_CTRL_RD		BIT(19)
114 115

#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
116
#define PCIE_PHY_STAT_ACK		BIT(16)
117

118 119
#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C

120
/* PHY registers (not memory-mapped) */
121
#define PCIE_PHY_ATEOVRD			0x10
122
#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
123 124 125 126 127 128
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1

#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
129
#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
130

131
#define PCIE_PHY_RX_ASIC_OUT 0x100D
132
#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
133

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/* iMX7 PCIe PHY registers */
#define PCIE_PHY_CMN_REG4		0x14
/* These are probably the bits that *aren't* DCC_FB_EN */
#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29

#define PCIE_PHY_CMN_REG15	        0x54
#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)

#define PCIE_PHY_CMN_REG24		0x90
#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)

#define PCIE_PHY_CMN_REG26		0x98
#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC

151
#define PHY_RX_OVRD_IN_LO 0x1005
152 153
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
154

155
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
156
{
157
	struct dw_pcie *pci = imx6_pcie->pci;
158
	bool val;
159 160 161 162
	u32 max_iterations = 10;
	u32 wait_counter = 0;

	do {
163 164
		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
			PCIE_PHY_STAT_ACK;
165 166 167 168 169 170 171 172 173 174 175
		wait_counter++;

		if (val == exp_val)
			return 0;

		udelay(1);
	} while (wait_counter < max_iterations);

	return -ETIMEDOUT;
}

176
static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
177
{
178
	struct dw_pcie *pci = imx6_pcie->pci;
179 180 181
	u32 val;
	int ret;

182
	val = PCIE_PHY_CTRL_DATA(addr);
183
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
184

185
	val |= PCIE_PHY_CTRL_CAP_ADR;
186
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
187

188
	ret = pcie_phy_poll_ack(imx6_pcie, true);
189 190 191
	if (ret)
		return ret;

192
	val = PCIE_PHY_CTRL_DATA(addr);
193
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
194

195
	return pcie_phy_poll_ack(imx6_pcie, false);
196 197 198
}

/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
199
static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
200
{
201
	struct dw_pcie *pci = imx6_pcie->pci;
202
	u32 phy_ctl;
203 204
	int ret;

205
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
206 207 208 209
	if (ret)
		return ret;

	/* assert Read signal */
210
	phy_ctl = PCIE_PHY_CTRL_RD;
211
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
212

213
	ret = pcie_phy_poll_ack(imx6_pcie, true);
214 215 216
	if (ret)
		return ret;

217
	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
218 219

	/* deassert Read signal */
220
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
221

222
	return pcie_phy_poll_ack(imx6_pcie, false);
223 224
}

225
static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
226
{
227
	struct dw_pcie *pci = imx6_pcie->pci;
228 229 230 231 232
	u32 var;
	int ret;

	/* write addr */
	/* cap addr */
233
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
234 235 236
	if (ret)
		return ret;

237
	var = PCIE_PHY_CTRL_DATA(data);
238
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
239 240

	/* capture data */
241
	var |= PCIE_PHY_CTRL_CAP_DAT;
242
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
243

244
	ret = pcie_phy_poll_ack(imx6_pcie, true);
245 246 247 248
	if (ret)
		return ret;

	/* deassert cap data */
249
	var = PCIE_PHY_CTRL_DATA(data);
250
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
251 252

	/* wait for ack de-assertion */
253
	ret = pcie_phy_poll_ack(imx6_pcie, false);
254 255 256 257
	if (ret)
		return ret;

	/* assert wr signal */
258
	var = PCIE_PHY_CTRL_WR;
259
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
260 261

	/* wait for ack */
262
	ret = pcie_phy_poll_ack(imx6_pcie, true);
263 264 265 266
	if (ret)
		return ret;

	/* deassert wr signal */
267
	var = PCIE_PHY_CTRL_DATA(data);
268
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
269 270

	/* wait for ack de-assertion */
271
	ret = pcie_phy_poll_ack(imx6_pcie, false);
272 273 274
	if (ret)
		return ret;

275
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
276 277 278 279

	return 0;
}

280
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
281
{
282
	u16 tmp;
283

284 285 286
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
		return;

287
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
288 289
	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
290
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
291 292 293

	usleep_range(2000, 3000);

294
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
295 296
	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
297
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
298 299
}

300
#ifdef CONFIG_ARM
301 302 303 304
/*  Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
		unsigned int fsr, struct pt_regs *regs)
{
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	unsigned long pc = instruction_pointer(regs);
	unsigned long instr = *(unsigned long *)pc;
	int reg = (instr >> 12) & 15;

	/*
	 * If the instruction being executed was a read,
	 * make it look like it read all-ones.
	 */
	if ((instr & 0x0c100000) == 0x04100000) {
		unsigned long val;

		if (instr & 0x00400000)
			val = 255;
		else
			val = -1;

		regs->uregs[reg] = val;
		regs->ARM_pc += 4;
		return 0;
	}

	if ((instr & 0x0e100090) == 0x00100090) {
		regs->uregs[reg] = -1;
		regs->ARM_pc += 4;
		return 0;
	}

	return 1;
333
}
334
#endif
335

L
Leonard Crestez 已提交
336 337 338 339 340 341 342 343 344 345 346 347
static int imx6_pcie_attach_pd(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct device_link *link;

	/* Do nothing when in a single power domain */
	if (dev->pm_domain)
		return 0;

	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
	if (IS_ERR(imx6_pcie->pd_pcie))
		return PTR_ERR(imx6_pcie->pd_pcie);
348 349 350
	/* Do nothing when power domain missing */
	if (!imx6_pcie->pd_pcie)
		return 0;
L
Leonard Crestez 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363
	link = device_link_add(dev, imx6_pcie->pd_pcie,
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie pd.\n");
		return -EINVAL;
	}

	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
	if (IS_ERR(imx6_pcie->pd_pcie_phy))
		return PTR_ERR(imx6_pcie->pd_pcie_phy);

364
	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
L
Leonard Crestez 已提交
365 366 367
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
368 369 370
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
		return -EINVAL;
L
Leonard Crestez 已提交
371 372 373 374 375
	}

	return 0;
}

376
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
377
{
378 379
	struct device *dev = imx6_pcie->pci->dev;

A
Andrey Smirnov 已提交
380
	switch (imx6_pcie->drvdata->variant) {
381
	case IMX7D:
382
	case IMX8MQ:
383 384 385
		reset_control_assert(imx6_pcie->pciephy_reset);
		reset_control_assert(imx6_pcie->apps_reset);
		break;
386
	case IMX6SX:
387 388 389 390 391 392 393
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
		/* Force PCIe PHY reset */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
394
		break;
395 396 397 398 399
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST,
				   IMX6Q_GPR1_PCIE_SW_RST);
		break;
400 401 402 403 404 405
	case IMX6Q:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
		break;
406
	}
407 408 409 410 411 412 413 414

	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		int ret = regulator_disable(imx6_pcie->vpcie);

		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
415 416
}

417 418 419 420 421 422
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}

423 424
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
425 426
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
427
	unsigned int offset;
428
	int ret = 0;
429

A
Andrey Smirnov 已提交
430
	switch (imx6_pcie->drvdata->variant) {
431
	case IMX6SX:
432 433
		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
		if (ret) {
434
			dev_err(dev, "unable to enable pcie_axi clock\n");
435
			break;
436 437 438 439
		}

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
440
		break;
441
	case IMX6QP:		/* FALLTHROUGH */
442 443 444 445 446 447 448 449 450 451
	case IMX6Q:
		/* power up core phy and enable ref clock */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
		/*
		 * the async reset input need ref clock to sync internally,
		 * when the ref clock comes after reset, internal synced
		 * reset time is too short, cannot meet the requirement.
		 * add one ~10us delay here.
		 */
452
		usleep_range(10, 100);
453 454 455
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
		break;
456 457
	case IMX7D:
		break;
458
	case IMX8MQ:
459 460 461 462 463 464
		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
		if (ret) {
			dev_err(dev, "unable to enable pcie_aux clock\n");
			break;
		}

465 466 467 468 469 470 471 472 473 474 475 476
		offset = imx6_pcie_grp_offset(imx6_pcie);
		/*
		 * Set the over ride low and enabled
		 * make sure that REF_CLK is turned on.
		 */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
				   0);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
		break;
477 478
	}

479
	return ret;
480 481
}

482 483 484 485 486
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
	u32 val;
	struct device *dev = imx6_pcie->pci->dev;

487 488 489 490 491 492
	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
				     IOMUXC_GPR22, val,
				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
				     PHY_PLL_LOCK_WAIT_TIMEOUT))
		dev_err(dev, "PCIe PLL lock timeout\n");
493 494
}

495
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
496
{
497 498
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
499 500
	int ret;

501 502 503 504 505 506 507 508 509
	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
		ret = regulator_enable(imx6_pcie->vpcie);
		if (ret) {
			dev_err(dev, "failed to enable vpcie regulator: %d\n",
				ret);
			return;
		}
	}

L
Lucas Stach 已提交
510
	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
511
	if (ret) {
512
		dev_err(dev, "unable to enable pcie_phy clock\n");
513
		goto err_pcie_phy;
514 515
	}

L
Lucas Stach 已提交
516
	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
517
	if (ret) {
518
		dev_err(dev, "unable to enable pcie_bus clock\n");
L
Lucas Stach 已提交
519
		goto err_pcie_bus;
520 521
	}

L
Lucas Stach 已提交
522
	ret = clk_prepare_enable(imx6_pcie->pcie);
523
	if (ret) {
524
		dev_err(dev, "unable to enable pcie clock\n");
L
Lucas Stach 已提交
525
		goto err_pcie;
526 527
	}

528 529
	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
	if (ret) {
530
		dev_err(dev, "unable to enable pcie ref clock\n");
531 532
		goto err_ref_clk;
	}
533

534 535 536
	/* allow the clocks to stabilize */
	usleep_range(200, 500);

537
	/* Some boards don't have PCIe reset GPIO. */
538
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
539 540
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					imx6_pcie->gpio_active_high);
541
		msleep(100);
542 543
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					!imx6_pcie->gpio_active_high);
544
	}
545

A
Andrey Smirnov 已提交
546
	switch (imx6_pcie->drvdata->variant) {
547 548 549
	case IMX8MQ:
		reset_control_deassert(imx6_pcie->pciephy_reset);
		break;
550 551
	case IMX7D:
		reset_control_deassert(imx6_pcie->pciephy_reset);
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
		 * oscillate, especially when cold.  This turns off "Duty-cycle
		 * Corrector" and other mysterious undocumented things.
		 */
		if (likely(imx6_pcie->phy_base)) {
			/* De-assert DCC_FB_EN */
			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
			/* Assert RX_EQS and RX_EQS_SEL */
			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
				| PCIE_PHY_CMN_REG24_RX_EQ,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
			/* Assert ATT_MODE */
			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
		} else {
			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
		}

572 573
		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
		break;
574
	case IMX6SX:
575 576
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
577 578 579 580 581 582 583 584 585 586
		break;
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST, 0);

		usleep_range(200, 500);
		break;
	case IMX6Q:		/* Nothing to do */
		break;
	}
587

588
	return;
589

590 591
err_ref_clk:
	clk_disable_unprepare(imx6_pcie->pcie);
L
Lucas Stach 已提交
592 593 594 595
err_pcie:
	clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
	clk_disable_unprepare(imx6_pcie->pcie_phy);
596 597 598 599 600 601 602
err_pcie_phy:
	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		ret = regulator_disable(imx6_pcie->vpcie);
		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
603 604
}

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
	unsigned int mask, val;

	if (imx6_pcie->drvdata->variant == IMX8MQ &&
	    imx6_pcie->controller_id == 1) {
		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
				    PCI_EXP_TYPE_ROOT_PORT);
	} else {
		mask = IMX6Q_GPR12_DEVICE_TYPE;
		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
				  PCI_EXP_TYPE_ROOT_PORT);
	}

	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
}

623
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
624
{
A
Andrey Smirnov 已提交
625
	switch (imx6_pcie->drvdata->variant) {
626 627 628 629 630 631 632 633 634 635
	case IMX8MQ:
		/*
		 * TODO: Currently this code assumes external
		 * oscillator is being used
		 */
		regmap_update_bits(imx6_pcie->iomuxc_gpr,
				   imx6_pcie_grp_offset(imx6_pcie),
				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
		break;
636 637 638 639 640
	case IMX7D:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
		break;
	case IMX6SX:
641 642 643
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
				   IMX6SX_GPR12_PCIE_RX_EQ_2);
644 645 646 647
		/* FALLTHROUGH */
	default:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
648

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		/* configure constant input signal to the pcie ctrl and phy */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
				   imx6_pcie->tx_deemph_gen1 << 0);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
				   imx6_pcie->tx_deemph_gen2_6db << 12);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_FULL,
				   imx6_pcie->tx_swing_full << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_LOW,
				   imx6_pcie->tx_swing_low << 25);
		break;
	}
670

671
	imx6_pcie_configure_type(imx6_pcie);
672 673
}

674 675 676 677
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
	int mult, div;
678
	u16 val;
679

680 681 682
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
		return 0;

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	switch (phy_rate) {
	case 125000000:
		/*
		 * The default settings of the MPLL are for a 125MHz input
		 * clock, so no need to reconfigure anything in that case.
		 */
		return 0;
	case 100000000:
		mult = 25;
		div = 0;
		break;
	case 200000000:
		mult = 25;
		div = 1;
		break;
	default:
		dev_err(imx6_pcie->pci->dev,
			"Unsupported PHY reference clock rate %lu\n", phy_rate);
		return -EINVAL;
	}

	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);

	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
	val |= PCIE_PHY_ATEOVRD_EN;
	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);

	return 0;
}

721
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
722
{
723 724
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
725
	u32 tmp;
726 727 728
	unsigned int retries;

	for (retries = 0; retries < 200; retries++) {
729
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
730 731 732 733 734 735
		/* Test if the speed change finished. */
		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
			return 0;
		usleep_range(100, 1000);
	}

736
	dev_err(dev, "Speed change timeout\n");
737
	return -ETIMEDOUT;
738 739
}

740 741 742 743
static void imx6_pcie_ltssm_enable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

A
Andrey Smirnov 已提交
744
	switch (imx6_pcie->drvdata->variant) {
745 746 747 748 749 750 751 752
	case IMX6Q:
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2,
				   IMX6Q_GPR12_PCIE_CTL_2);
		break;
	case IMX7D:
753
	case IMX8MQ:
754 755 756 757 758
		reset_control_deassert(imx6_pcie->apps_reset);
		break;
	}
}

759
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
760
{
761 762
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
763
	u32 tmp;
764
	int ret;
765 766 767 768 769 770

	/*
	 * Force Gen1 operation when starting the link.  In case the link is
	 * started in Gen2 mode, there is a possibility the devices on the
	 * bus will not be detected at all.  This happens with PCIe switches.
	 */
771
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
772 773
	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
774
	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
775 776

	/* Start LTSSM. */
777
	imx6_pcie_ltssm_enable(dev);
778

779
	ret = dw_pcie_wait_for_link(pci);
780
	if (ret)
781
		goto err_reset_phy;
782

783 784
	if (imx6_pcie->link_gen == 2) {
		/* Allow Gen2 mode after the link is up. */
785
		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
786 787
		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
788
		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
789

790
		/*
791 792
		 * Start Directed Speed Change so the best possible
		 * speed both link partners support can be negotiated.
793
		 */
794 795 796 797
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
		tmp |= PORT_LOGIC_SPEED_CHANGE;
		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);

798 799
		if (imx6_pcie->drvdata->flags &
		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
			/*
			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
			 * from i.MX6 family when no link speed transition
			 * occurs and we go Gen1 -> yep, Gen1. The difference
			 * is that, in such case, it will not be cleared by HW
			 * which will cause the following code to report false
			 * failure.
			 */

			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
			if (ret) {
				dev_err(dev, "Failed to bring link up!\n");
				goto err_reset_phy;
			}
		}
815

816
		/* Make sure link training is finished as well! */
817
		ret = dw_pcie_wait_for_link(pci);
818 819 820 821
		if (ret) {
			dev_err(dev, "Failed to bring link up!\n");
			goto err_reset_phy;
		}
822 823
	} else {
		dev_info(dev, "Link: Gen2 disabled\n");
824 825
	}

826
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
827
	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
828
	return 0;
829 830

err_reset_phy:
831
	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
832 833
		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
834
	imx6_pcie_reset_phy(imx6_pcie);
835
	return ret;
836 837
}

838
static int imx6_pcie_host_init(struct pcie_port *pp)
839
{
840 841
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
842

843 844 845
	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
846
	imx6_setup_phy_mpll(imx6_pcie);
847
	dw_pcie_setup_rc(pp);
848
	imx6_pcie_establish_link(imx6_pcie);
L
Lucas Stach 已提交
849 850 851

	if (IS_ENABLED(CONFIG_PCI_MSI))
		dw_pcie_msi_init(pp);
852 853

	return 0;
854 855
}

856
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
857 858 859
	.host_init = imx6_pcie_host_init,
};

860 861
static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
			      struct platform_device *pdev)
862
{
863 864 865
	struct dw_pcie *pci = imx6_pcie->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = &pdev->dev;
866 867
	int ret;

L
Lucas Stach 已提交
868 869 870
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
		if (pp->msi_irq <= 0) {
871
			dev_err(dev, "failed to get MSI irq\n");
L
Lucas Stach 已提交
872 873 874 875
			return -ENODEV;
		}
	}

876 877 878 879
	pp->ops = &imx6_pcie_host_ops;

	ret = dw_pcie_host_init(pp);
	if (ret) {
880
		dev_err(dev, "failed to initialize host\n");
881 882 883 884 885 886
		return ret;
	}

	return 0;
}

887
static const struct dw_pcie_ops dw_pcie_ops = {
888
	/* No special ops needed, but pcie-designware still expects this struct */
889 890
};

891 892 893 894 895
#ifdef CONFIG_PM_SLEEP
static void imx6_pcie_ltssm_disable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

A
Andrey Smirnov 已提交
896
	switch (imx6_pcie->drvdata->variant) {
897 898 899 900 901 902 903 904 905 906 907 908 909
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0);
		break;
	case IMX7D:
		reset_control_assert(imx6_pcie->apps_reset);
		break;
	default:
		dev_err(dev, "ltssm_disable not supported\n");
	}
}

910 911
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
912 913 914 915 916 917 918 919 920 921
	struct device *dev = imx6_pcie->pci->dev;

	/* Some variants have a turnoff reset in DT */
	if (imx6_pcie->turnoff_reset) {
		reset_control_assert(imx6_pcie->turnoff_reset);
		reset_control_deassert(imx6_pcie->turnoff_reset);
		goto pm_turnoff_sleep;
	}

	/* Others poke directly at IOMUXC registers */
A
Andrey Smirnov 已提交
922
	switch (imx6_pcie->drvdata->variant) {
923 924 925 926 927 928 929 930 931 932 933
	case IMX6SX:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
		break;
	default:
		dev_err(dev, "PME_Turn_Off not implemented\n");
		return;
	}
934 935 936 937 938 939 940 941

	/*
	 * Components with an upstream port must respond to
	 * PME_Turn_Off with PME_TO_Ack but we can't check.
	 *
	 * The standard recommends a 1-10ms timeout after which to
	 * proceed anyway as if acks were received.
	 */
942
pm_turnoff_sleep:
943 944 945
	usleep_range(1000, 10000);
}

946 947 948 949 950 951
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
	clk_disable_unprepare(imx6_pcie->pcie);
	clk_disable_unprepare(imx6_pcie->pcie_phy);
	clk_disable_unprepare(imx6_pcie->pcie_bus);

A
Andrey Smirnov 已提交
952
	switch (imx6_pcie->drvdata->variant) {
953 954 955 956
	case IMX6SX:
		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
		break;
	case IMX7D:
957 958 959
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
960
		break;
961 962 963
	case IMX8MQ:
		clk_disable_unprepare(imx6_pcie->pcie_aux);
		break;
964 965
	default:
		break;
966 967 968 969 970 971 972
	}
}

static int imx6_pcie_suspend_noirq(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

973
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
974 975
		return 0;

976
	imx6_pcie_pm_turnoff(imx6_pcie);
977 978 979 980 981 982 983 984 985 986 987 988
	imx6_pcie_clk_disable(imx6_pcie);
	imx6_pcie_ltssm_disable(dev);

	return 0;
}

static int imx6_pcie_resume_noirq(struct device *dev)
{
	int ret;
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct pcie_port *pp = &imx6_pcie->pci->pp;

989
	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		return 0;

	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
	dw_pcie_setup_rc(pp);

	ret = imx6_pcie_establish_link(imx6_pcie);
	if (ret < 0)
		dev_info(dev, "pcie link is down after resume.\n");

	return 0;
}
#endif

static const struct dev_pm_ops imx6_pcie_pm_ops = {
	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
				      imx6_pcie_resume_noirq)
};

1010
static int imx6_pcie_probe(struct platform_device *pdev)
1011
{
1012
	struct device *dev = &pdev->dev;
1013
	struct dw_pcie *pci;
1014
	struct imx6_pcie *imx6_pcie;
1015
	struct device_node *np;
1016
	struct resource *dbi_base;
1017
	struct device_node *node = dev->of_node;
1018
	int ret;
1019
	u16 val;
1020

1021
	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1022 1023 1024
	if (!imx6_pcie)
		return -ENOMEM;

1025 1026 1027 1028 1029 1030
	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
	if (!pci)
		return -ENOMEM;

	pci->dev = dev;
	pci->ops = &dw_pcie_ops;
1031

1032
	imx6_pcie->pci = pci;
A
Andrey Smirnov 已提交
1033
	imx6_pcie->drvdata = of_device_get_match_data(dev);
1034

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	/* Find the PHY if one is defined, only imx7d uses it */
	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
	if (np) {
		struct resource res;

		ret = of_address_to_resource(np, 0, &res);
		if (ret) {
			dev_err(dev, "Unable to map PCIe PHY\n");
			return ret;
		}
		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
		if (IS_ERR(imx6_pcie->phy_base)) {
			dev_err(dev, "Unable to map PCIe PHY\n");
			return PTR_ERR(imx6_pcie->phy_base);
		}
	}
1051

1052
	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053 1054 1055
	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
1056 1057

	/* Fetch GPIOs */
1058 1059
	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
	imx6_pcie->gpio_active_high = of_property_read_bool(node,
1060
						"reset-gpio-active-high");
1061
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1062
		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1063 1064 1065 1066
				imx6_pcie->gpio_active_high ?
					GPIOF_OUT_INIT_HIGH :
					GPIOF_OUT_INIT_LOW,
				"PCIe reset");
1067
		if (ret) {
1068
			dev_err(dev, "unable to get reset gpio\n");
1069 1070
			return ret;
		}
1071 1072
	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
		return imx6_pcie->reset_gpio;
1073
	}
1074 1075

	/* Fetch clocks */
1076
	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
L
Lucas Stach 已提交
1077
	if (IS_ERR(imx6_pcie->pcie_phy)) {
1078
		dev_err(dev, "pcie_phy clock source missing or invalid\n");
L
Lucas Stach 已提交
1079
		return PTR_ERR(imx6_pcie->pcie_phy);
1080 1081
	}

1082
	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
L
Lucas Stach 已提交
1083
	if (IS_ERR(imx6_pcie->pcie_bus)) {
1084
		dev_err(dev, "pcie_bus clock source missing or invalid\n");
L
Lucas Stach 已提交
1085
		return PTR_ERR(imx6_pcie->pcie_bus);
1086 1087
	}

1088
	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
L
Lucas Stach 已提交
1089
	if (IS_ERR(imx6_pcie->pcie)) {
1090
		dev_err(dev, "pcie clock source missing or invalid\n");
L
Lucas Stach 已提交
1091
		return PTR_ERR(imx6_pcie->pcie);
1092 1093
	}

A
Andrey Smirnov 已提交
1094
	switch (imx6_pcie->drvdata->variant) {
1095
	case IMX6SX:
1096
		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1097 1098
							   "pcie_inbound_axi");
		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
1099
			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
1100 1101
			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
		}
1102
		break;
1103
	case IMX8MQ:
1104 1105 1106 1107 1108 1109
		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
		if (IS_ERR(imx6_pcie->pcie_aux)) {
			dev_err(dev, "pcie_aux clock source missing or invalid\n");
			return PTR_ERR(imx6_pcie->pcie_aux);
		}
		/* fall through */
1110
	case IMX7D:
1111 1112 1113
		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
			imx6_pcie->controller_id = 1;

1114 1115
		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
									    "pciephy");
1116
		if (IS_ERR(imx6_pcie->pciephy_reset)) {
1117
			dev_err(dev, "Failed to get PCIEPHY reset control\n");
1118 1119 1120
			return PTR_ERR(imx6_pcie->pciephy_reset);
		}

1121 1122
		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
									 "apps");
1123
		if (IS_ERR(imx6_pcie->apps_reset)) {
1124
			dev_err(dev, "Failed to get PCIE APPS reset control\n");
1125 1126 1127 1128 1129
			return PTR_ERR(imx6_pcie->apps_reset);
		}
		break;
	default:
		break;
1130 1131
	}

1132 1133 1134 1135 1136 1137 1138
	/* Grab turnoff reset */
	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
	if (IS_ERR(imx6_pcie->turnoff_reset)) {
		dev_err(dev, "Failed to get TURNOFF reset control\n");
		return PTR_ERR(imx6_pcie->turnoff_reset);
	}

1139 1140 1141 1142
	/* Grab GPR config register range */
	imx6_pcie->iomuxc_gpr =
		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1143
		dev_err(dev, "unable to find iomuxc registers\n");
1144
		return PTR_ERR(imx6_pcie->iomuxc_gpr);
1145
	}
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

	/* Grab PCIe PHY Tx Settings */
	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
				 &imx6_pcie->tx_deemph_gen1))
		imx6_pcie->tx_deemph_gen1 = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
				 &imx6_pcie->tx_deemph_gen2_3p5db))
		imx6_pcie->tx_deemph_gen2_3p5db = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
				 &imx6_pcie->tx_deemph_gen2_6db))
		imx6_pcie->tx_deemph_gen2_6db = 20;

	if (of_property_read_u32(node, "fsl,tx-swing-full",
				 &imx6_pcie->tx_swing_full))
		imx6_pcie->tx_swing_full = 127;

	if (of_property_read_u32(node, "fsl,tx-swing-low",
				 &imx6_pcie->tx_swing_low))
		imx6_pcie->tx_swing_low = 127;
1167

1168
	/* Limit link speed */
1169
	ret = of_property_read_u32(node, "fsl,max-link-speed",
1170 1171 1172 1173
				   &imx6_pcie->link_gen);
	if (ret)
		imx6_pcie->link_gen = 1;

1174 1175 1176 1177 1178 1179 1180
	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
	if (IS_ERR(imx6_pcie->vpcie)) {
		if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
			return -EPROBE_DEFER;
		imx6_pcie->vpcie = NULL;
	}

1181 1182
	platform_set_drvdata(pdev, imx6_pcie);

L
Leonard Crestez 已提交
1183 1184 1185 1186
	ret = imx6_pcie_attach_pd(dev);
	if (ret)
		return ret;

1187
	ret = imx6_add_pcie_port(imx6_pcie, pdev);
1188
	if (ret < 0)
1189
		return ret;
1190

1191 1192 1193 1194 1195 1196 1197 1198
	if (pci_msi_enabled()) {
		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
					PCI_MSI_FLAGS);
		val |= PCI_MSI_FLAGS_ENABLE;
		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
				   val);
	}

1199 1200 1201
	return 0;
}

1202 1203 1204 1205 1206
static void imx6_pcie_shutdown(struct platform_device *pdev)
{
	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);

	/* bring down link, so bootloader gets clean state in case of reboot */
1207
	imx6_pcie_assert_core_reset(imx6_pcie);
1208 1209
}

A
Andrey Smirnov 已提交
1210 1211 1212
static const struct imx6_pcie_drvdata drvdata[] = {
	[IMX6Q] = {
		.variant = IMX6Q,
1213 1214
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
A
Andrey Smirnov 已提交
1215 1216 1217
	},
	[IMX6SX] = {
		.variant = IMX6SX,
1218
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1219 1220
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
A
Andrey Smirnov 已提交
1221 1222 1223
	},
	[IMX6QP] = {
		.variant = IMX6QP,
1224 1225
		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
A
Andrey Smirnov 已提交
1226 1227 1228
	},
	[IMX7D] = {
		.variant = IMX7D,
1229
		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
A
Andrey Smirnov 已提交
1230
	},
1231 1232 1233
	[IMX8MQ] = {
		.variant = IMX8MQ,
	},
A
Andrey Smirnov 已提交
1234 1235
};

1236
static const struct of_device_id imx6_pcie_of_match[] = {
A
Andrey Smirnov 已提交
1237 1238 1239 1240
	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
1241
	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
1242 1243 1244 1245 1246 1247
	{},
};

static struct platform_driver imx6_pcie_driver = {
	.driver = {
		.name	= "imx6q-pcie",
1248
		.of_match_table = imx6_pcie_of_match,
1249
		.suppress_bind_attrs = true,
1250
		.pm = &imx6_pcie_pm_ops,
1251
		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1252
	},
1253
	.probe    = imx6_pcie_probe,
1254
	.shutdown = imx6_pcie_shutdown,
1255 1256 1257 1258
};

static int __init imx6_pcie_init(void)
{
1259
#ifdef CONFIG_ARM
1260 1261 1262 1263 1264 1265 1266
	/*
	 * Since probe() can be deferred we need to make sure that
	 * hook_fault_code is not called after __init memory is freed
	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
	 * we can install the handler here without risking it
	 * accessing some uninitialized driver state.
	 */
1267 1268
	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
			"external abort on non-linefetch");
1269
#endif
1270 1271

	return platform_driver_register(&imx6_pcie_driver);
1272
}
1273
device_initcall(imx6_pcie_init);