pci-imx6.c 28.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PCIe host controller driver for Freescale i.MX6 SoCs
 *
 * Copyright (C) 2013 Kosagi
 *		http://www.kosagi.com
 *
 * Author: Sean Cross <xobs@kosagi.com>
 */

#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
17
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
18 19
#include <linux/module.h>
#include <linux/of_gpio.h>
20
#include <linux/of_device.h>
21 22 23
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
24
#include <linux/regulator/consumer.h>
25 26 27
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
L
Lucas Stach 已提交
28
#include <linux/interrupt.h>
29
#include <linux/reset.h>
L
Leonard Crestez 已提交
30 31
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
32 33 34

#include "pcie-designware.h"

35
#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
36

37 38
enum imx6_pcie_variants {
	IMX6Q,
39 40
	IMX6SX,
	IMX6QP,
41
	IMX7D,
42 43
};

44
struct imx6_pcie {
45
	struct dw_pcie		*pci;
46
	int			reset_gpio;
47
	bool			gpio_active_high;
L
Lucas Stach 已提交
48 49
	struct clk		*pcie_bus;
	struct clk		*pcie_phy;
50
	struct clk		*pcie_inbound_axi;
L
Lucas Stach 已提交
51
	struct clk		*pcie;
52
	struct regmap		*iomuxc_gpr;
53 54
	struct reset_control	*pciephy_reset;
	struct reset_control	*apps_reset;
55
	struct reset_control	*turnoff_reset;
56
	enum imx6_pcie_variants variant;
57 58 59 60 61
	u32			tx_deemph_gen1;
	u32			tx_deemph_gen2_3p5db;
	u32			tx_deemph_gen2_6db;
	u32			tx_swing_full;
	u32			tx_swing_low;
62
	int			link_gen;
63
	struct regulator	*vpcie;
L
Leonard Crestez 已提交
64 65 66 67 68

	/* power domain for pcie */
	struct device		*pd_pcie;
	/* power domain for pcie phy */
	struct device		*pd_pcie_phy;
69 70
};

71 72 73 74 75
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
#define PHY_PLL_LOCK_WAIT_MAX_RETRIES	2000
#define PHY_PLL_LOCK_WAIT_USLEEP_MIN	50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200

76
/* PCIe Root Complex registers (memory-mapped) */
77
#define PCIE_RC_IMX6_MSI_CAP			0x50
78 79 80 81 82
#define PCIE_RC_LCR				0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf

83 84
#define PCIE_RC_LCSR				0x80

85 86
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
87 88 89
#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
#define PCIE_PL_PFLR_LINK_STATE_MASK		(0x3f << 16)
#define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
90 91 92 93 94 95 96 97 98 99 100 101 102
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)

#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA_LOC 0
#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
#define PCIE_PHY_CTRL_WR_LOC 18
#define PCIE_PHY_CTRL_RD_LOC 19

#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK_LOC 16

103 104 105
#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)

106
/* PHY registers (not memory-mapped) */
107 108 109 110 111 112 113 114 115 116
#define PCIE_PHY_ATEOVRD			0x10
#define  PCIE_PHY_ATEOVRD_EN			(0x1 << 2)
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1

#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		(0x1 << 9)

117
#define PCIE_PHY_RX_ASIC_OUT 0x100D
118
#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
119 120 121 122 123

#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)

124
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
125
{
126
	struct dw_pcie *pci = imx6_pcie->pci;
127 128 129 130 131
	u32 val;
	u32 max_iterations = 10;
	u32 wait_counter = 0;

	do {
132
		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
133 134 135 136 137 138 139 140 141 142 143 144
		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
		wait_counter++;

		if (val == exp_val)
			return 0;

		udelay(1);
	} while (wait_counter < max_iterations);

	return -ETIMEDOUT;
}

145
static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
146
{
147
	struct dw_pcie *pci = imx6_pcie->pci;
148 149 150 151
	u32 val;
	int ret;

	val = addr << PCIE_PHY_CTRL_DATA_LOC;
152
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
153 154

	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
155
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
156

157
	ret = pcie_phy_poll_ack(imx6_pcie, 1);
158 159 160 161
	if (ret)
		return ret;

	val = addr << PCIE_PHY_CTRL_DATA_LOC;
162
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
163

164
	return pcie_phy_poll_ack(imx6_pcie, 0);
165 166 167
}

/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
168
static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
169
{
170
	struct dw_pcie *pci = imx6_pcie->pci;
171 172 173
	u32 val, phy_ctl;
	int ret;

174
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
175 176 177 178 179
	if (ret)
		return ret;

	/* assert Read signal */
	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
180
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
181

182
	ret = pcie_phy_poll_ack(imx6_pcie, 1);
183 184 185
	if (ret)
		return ret;

186
	val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
187 188 189
	*data = val & 0xffff;

	/* deassert Read signal */
190
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
191

192
	return pcie_phy_poll_ack(imx6_pcie, 0);
193 194
}

195
static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
196
{
197
	struct dw_pcie *pci = imx6_pcie->pci;
198 199 200 201 202
	u32 var;
	int ret;

	/* write addr */
	/* cap addr */
203
	ret = pcie_phy_wait_ack(imx6_pcie, addr);
204 205 206 207
	if (ret)
		return ret;

	var = data << PCIE_PHY_CTRL_DATA_LOC;
208
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
209 210 211

	/* capture data */
	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
212
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
213

214
	ret = pcie_phy_poll_ack(imx6_pcie, 1);
215 216 217 218 219
	if (ret)
		return ret;

	/* deassert cap data */
	var = data << PCIE_PHY_CTRL_DATA_LOC;
220
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
221 222

	/* wait for ack de-assertion */
223
	ret = pcie_phy_poll_ack(imx6_pcie, 0);
224 225 226 227 228
	if (ret)
		return ret;

	/* assert wr signal */
	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
229
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
230 231

	/* wait for ack */
232
	ret = pcie_phy_poll_ack(imx6_pcie, 1);
233 234 235 236 237
	if (ret)
		return ret;

	/* deassert wr signal */
	var = data << PCIE_PHY_CTRL_DATA_LOC;
238
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
239 240

	/* wait for ack de-assertion */
241
	ret = pcie_phy_poll_ack(imx6_pcie, 0);
242 243 244
	if (ret)
		return ret;

245
	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
246 247 248 249

	return 0;
}

250
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
251 252 253
{
	u32 tmp;

254
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
255 256
	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
257
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
258 259 260

	usleep_range(2000, 3000);

261
	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
262 263
	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
264
	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
265 266
}

267 268 269 270
/*  Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
		unsigned int fsr, struct pt_regs *regs)
{
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	unsigned long pc = instruction_pointer(regs);
	unsigned long instr = *(unsigned long *)pc;
	int reg = (instr >> 12) & 15;

	/*
	 * If the instruction being executed was a read,
	 * make it look like it read all-ones.
	 */
	if ((instr & 0x0c100000) == 0x04100000) {
		unsigned long val;

		if (instr & 0x00400000)
			val = 255;
		else
			val = -1;

		regs->uregs[reg] = val;
		regs->ARM_pc += 4;
		return 0;
	}

	if ((instr & 0x0e100090) == 0x00100090) {
		regs->uregs[reg] = -1;
		regs->ARM_pc += 4;
		return 0;
	}

	return 1;
299 300
}

L
Leonard Crestez 已提交
301 302 303 304 305 306 307 308 309 310 311 312
static int imx6_pcie_attach_pd(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct device_link *link;

	/* Do nothing when in a single power domain */
	if (dev->pm_domain)
		return 0;

	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
	if (IS_ERR(imx6_pcie->pd_pcie))
		return PTR_ERR(imx6_pcie->pd_pcie);
313 314 315
	/* Do nothing when power domain missing */
	if (!imx6_pcie->pd_pcie)
		return 0;
L
Leonard Crestez 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328
	link = device_link_add(dev, imx6_pcie->pd_pcie,
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie pd.\n");
		return -EINVAL;
	}

	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
	if (IS_ERR(imx6_pcie->pd_pcie_phy))
		return PTR_ERR(imx6_pcie->pd_pcie_phy);

329
	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
L
Leonard Crestez 已提交
330 331 332
			DL_FLAG_STATELESS |
			DL_FLAG_PM_RUNTIME |
			DL_FLAG_RPM_ACTIVE);
333 334 335
	if (!link) {
		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
		return -EINVAL;
L
Leonard Crestez 已提交
336 337 338 339 340
	}

	return 0;
}

341
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
342
{
343 344
	struct device *dev = imx6_pcie->pci->dev;

345
	switch (imx6_pcie->variant) {
346 347 348 349
	case IMX7D:
		reset_control_assert(imx6_pcie->pciephy_reset);
		reset_control_assert(imx6_pcie->apps_reset);
		break;
350
	case IMX6SX:
351 352 353 354 355 356 357
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
		/* Force PCIe PHY reset */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
358
		break;
359 360 361 362 363
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST,
				   IMX6Q_GPR1_PCIE_SW_RST);
		break;
364 365 366 367 368 369
	case IMX6Q:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
		break;
370
	}
371 372 373 374 375 376 377 378

	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		int ret = regulator_disable(imx6_pcie->vpcie);

		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
379 380
}

381 382
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
383 384
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
385
	int ret = 0;
386

387 388
	switch (imx6_pcie->variant) {
	case IMX6SX:
389 390
		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
		if (ret) {
391
			dev_err(dev, "unable to enable pcie_axi clock\n");
392
			break;
393 394 395 396
		}

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
397
		break;
398
	case IMX6QP:		/* FALLTHROUGH */
399 400 401 402 403 404 405 406 407 408 409 410 411 412
	case IMX6Q:
		/* power up core phy and enable ref clock */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
		/*
		 * the async reset input need ref clock to sync internally,
		 * when the ref clock comes after reset, internal synced
		 * reset time is too short, cannot meet the requirement.
		 * add one ~10us delay here.
		 */
		udelay(10);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
		break;
413 414
	case IMX7D:
		break;
415 416
	}

417
	return ret;
418 419
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
{
	u32 val;
	unsigned int retries;
	struct device *dev = imx6_pcie->pci->dev;

	for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
		regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);

		if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
			return;

		usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
			     PHY_PLL_LOCK_WAIT_USLEEP_MAX);
	}

	dev_err(dev, "PCIe PLL lock timeout\n");
}

439
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
440
{
441 442
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
443 444
	int ret;

445 446 447 448 449 450 451 452 453
	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
		ret = regulator_enable(imx6_pcie->vpcie);
		if (ret) {
			dev_err(dev, "failed to enable vpcie regulator: %d\n",
				ret);
			return;
		}
	}

L
Lucas Stach 已提交
454
	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
455
	if (ret) {
456
		dev_err(dev, "unable to enable pcie_phy clock\n");
457
		goto err_pcie_phy;
458 459
	}

L
Lucas Stach 已提交
460
	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
461
	if (ret) {
462
		dev_err(dev, "unable to enable pcie_bus clock\n");
L
Lucas Stach 已提交
463
		goto err_pcie_bus;
464 465
	}

L
Lucas Stach 已提交
466
	ret = clk_prepare_enable(imx6_pcie->pcie);
467
	if (ret) {
468
		dev_err(dev, "unable to enable pcie clock\n");
L
Lucas Stach 已提交
469
		goto err_pcie;
470 471
	}

472 473
	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
	if (ret) {
474
		dev_err(dev, "unable to enable pcie ref clock\n");
475 476
		goto err_ref_clk;
	}
477

478 479 480
	/* allow the clocks to stabilize */
	usleep_range(200, 500);

481
	/* Some boards don't have PCIe reset GPIO. */
482
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
483 484
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					imx6_pcie->gpio_active_high);
485
		msleep(100);
486 487
		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
					!imx6_pcie->gpio_active_high);
488
	}
489

490
	switch (imx6_pcie->variant) {
491 492 493 494
	case IMX7D:
		reset_control_deassert(imx6_pcie->pciephy_reset);
		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
		break;
495
	case IMX6SX:
496 497
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
498 499 500 501 502 503 504 505 506 507
		break;
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
				   IMX6Q_GPR1_PCIE_SW_RST, 0);

		usleep_range(200, 500);
		break;
	case IMX6Q:		/* Nothing to do */
		break;
	}
508

509
	return;
510

511 512
err_ref_clk:
	clk_disable_unprepare(imx6_pcie->pcie);
L
Lucas Stach 已提交
513 514 515 516
err_pcie:
	clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
	clk_disable_unprepare(imx6_pcie->pcie_phy);
517 518 519 520 521 522 523
err_pcie_phy:
	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
		ret = regulator_disable(imx6_pcie->vpcie);
		if (ret)
			dev_err(dev, "failed to disable vpcie regulator: %d\n",
				ret);
	}
524 525
}

526
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
527
{
528 529 530 531 532 533
	switch (imx6_pcie->variant) {
	case IMX7D:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
		break;
	case IMX6SX:
534 535 536
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
				   IMX6SX_GPR12_PCIE_RX_EQ_2);
537 538 539 540
		/* FALLTHROUGH */
	default:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
541

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
		/* configure constant input signal to the pcie ctrl and phy */
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);

		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
				   imx6_pcie->tx_deemph_gen1 << 0);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
				   imx6_pcie->tx_deemph_gen2_6db << 12);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_FULL,
				   imx6_pcie->tx_swing_full << 18);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
				   IMX6Q_GPR8_TX_SWING_LOW,
				   imx6_pcie->tx_swing_low << 25);
		break;
	}
563 564 565 566 567

	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
			IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
{
	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
	int mult, div;
	u32 val;

	switch (phy_rate) {
	case 125000000:
		/*
		 * The default settings of the MPLL are for a 125MHz input
		 * clock, so no need to reconfigure anything in that case.
		 */
		return 0;
	case 100000000:
		mult = 25;
		div = 0;
		break;
	case 200000000:
		mult = 25;
		div = 1;
		break;
	default:
		dev_err(imx6_pcie->pci->dev,
			"Unsupported PHY reference clock rate %lu\n", phy_rate);
		return -EINVAL;
	}

	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);

	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
	val |= PCIE_PHY_ATEOVRD_EN;
	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);

	return 0;
}

612
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
613
{
614 615
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
616

617
	/* check if the link is up or not */
618
	if (!dw_pcie_wait_for_link(pci))
619
		return 0;
620

621
	dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
622 623
		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
624
	return -ETIMEDOUT;
625 626
}

627
static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
628
{
629 630
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
631
	u32 tmp;
632 633 634
	unsigned int retries;

	for (retries = 0; retries < 200; retries++) {
635
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
636 637 638 639 640 641
		/* Test if the speed change finished. */
		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
			return 0;
		usleep_range(100, 1000);
	}

642
	dev_err(dev, "Speed change timeout\n");
643
	return -EINVAL;
644 645
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
static void imx6_pcie_ltssm_enable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

	switch (imx6_pcie->variant) {
	case IMX6Q:
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2,
				   IMX6Q_GPR12_PCIE_CTL_2);
		break;
	case IMX7D:
		reset_control_deassert(imx6_pcie->apps_reset);
		break;
	}
}

664
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
665
{
666 667
	struct dw_pcie *pci = imx6_pcie->pci;
	struct device *dev = pci->dev;
668
	u32 tmp;
669
	int ret;
670 671 672 673 674 675

	/*
	 * Force Gen1 operation when starting the link.  In case the link is
	 * started in Gen2 mode, there is a possibility the devices on the
	 * bus will not be detected at all.  This happens with PCIe switches.
	 */
676
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
677 678
	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
679
	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
680 681

	/* Start LTSSM. */
682
	imx6_pcie_ltssm_enable(dev);
683

684
	ret = imx6_pcie_wait_for_link(imx6_pcie);
685
	if (ret)
686
		goto err_reset_phy;
687

688 689
	if (imx6_pcie->link_gen == 2) {
		/* Allow Gen2 mode after the link is up. */
690
		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
691 692
		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
693
		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
694

695
		/*
696 697
		 * Start Directed Speed Change so the best possible
		 * speed both link partners support can be negotiated.
698
		 */
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
		tmp |= PORT_LOGIC_SPEED_CHANGE;
		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);

		if (imx6_pcie->variant != IMX7D) {
			/*
			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
			 * from i.MX6 family when no link speed transition
			 * occurs and we go Gen1 -> yep, Gen1. The difference
			 * is that, in such case, it will not be cleared by HW
			 * which will cause the following code to report false
			 * failure.
			 */

			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
			if (ret) {
				dev_err(dev, "Failed to bring link up!\n");
				goto err_reset_phy;
			}
		}
719

720 721
		/* Make sure link training is finished as well! */
		ret = imx6_pcie_wait_for_link(imx6_pcie);
722 723 724 725
		if (ret) {
			dev_err(dev, "Failed to bring link up!\n");
			goto err_reset_phy;
		}
726 727
	} else {
		dev_info(dev, "Link: Gen2 disabled\n");
728 729
	}

730
	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
731
	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
732
	return 0;
733 734

err_reset_phy:
735
	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
736 737
		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
738
	imx6_pcie_reset_phy(imx6_pcie);
739
	return ret;
740 741
}

742
static int imx6_pcie_host_init(struct pcie_port *pp)
743
{
744 745
	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
746

747 748 749
	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
750
	imx6_setup_phy_mpll(imx6_pcie);
751
	dw_pcie_setup_rc(pp);
752
	imx6_pcie_establish_link(imx6_pcie);
L
Lucas Stach 已提交
753 754 755

	if (IS_ENABLED(CONFIG_PCI_MSI))
		dw_pcie_msi_init(pp);
756 757

	return 0;
758 759
}

760
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
761 762 763
	.host_init = imx6_pcie_host_init,
};

764 765
static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
			      struct platform_device *pdev)
766
{
767 768 769
	struct dw_pcie *pci = imx6_pcie->pci;
	struct pcie_port *pp = &pci->pp;
	struct device *dev = &pdev->dev;
770 771
	int ret;

L
Lucas Stach 已提交
772 773 774
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
		if (pp->msi_irq <= 0) {
775
			dev_err(dev, "failed to get MSI irq\n");
L
Lucas Stach 已提交
776 777 778 779
			return -ENODEV;
		}
	}

780 781 782 783
	pp->ops = &imx6_pcie_host_ops;

	ret = dw_pcie_host_init(pp);
	if (ret) {
784
		dev_err(dev, "failed to initialize host\n");
785 786 787 788 789 790
		return ret;
	}

	return 0;
}

791
static const struct dw_pcie_ops dw_pcie_ops = {
792
	/* No special ops needed, but pcie-designware still expects this struct */
793 794
};

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
#ifdef CONFIG_PM_SLEEP
static void imx6_pcie_ltssm_disable(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

	switch (imx6_pcie->variant) {
	case IMX6SX:
	case IMX6QP:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX6Q_GPR12_PCIE_CTL_2, 0);
		break;
	case IMX7D:
		reset_control_assert(imx6_pcie->apps_reset);
		break;
	default:
		dev_err(dev, "ltssm_disable not supported\n");
	}
}

814 815
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
	struct device *dev = imx6_pcie->pci->dev;

	/* Some variants have a turnoff reset in DT */
	if (imx6_pcie->turnoff_reset) {
		reset_control_assert(imx6_pcie->turnoff_reset);
		reset_control_deassert(imx6_pcie->turnoff_reset);
		goto pm_turnoff_sleep;
	}

	/* Others poke directly at IOMUXC registers */
	switch (imx6_pcie->variant) {
	case IMX6SX:
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
		break;
	default:
		dev_err(dev, "PME_Turn_Off not implemented\n");
		return;
	}
838 839 840 841 842 843 844 845

	/*
	 * Components with an upstream port must respond to
	 * PME_Turn_Off with PME_TO_Ack but we can't check.
	 *
	 * The standard recommends a 1-10ms timeout after which to
	 * proceed anyway as if acks were received.
	 */
846
pm_turnoff_sleep:
847 848 849
	usleep_range(1000, 10000);
}

850 851 852 853 854 855
static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
{
	clk_disable_unprepare(imx6_pcie->pcie);
	clk_disable_unprepare(imx6_pcie->pcie_phy);
	clk_disable_unprepare(imx6_pcie->pcie_bus);

856 857 858 859 860
	switch (imx6_pcie->variant) {
	case IMX6SX:
		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
		break;
	case IMX7D:
861 862 863
		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
864 865 866
		break;
	default:
		break;
867 868 869
	}
}

870 871 872 873 874 875
static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
{
	return (imx6_pcie->variant == IMX7D ||
		imx6_pcie->variant == IMX6SX);
}

876 877 878 879
static int imx6_pcie_suspend_noirq(struct device *dev)
{
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);

880
	if (!imx6_pcie_supports_suspend(imx6_pcie))
881 882
		return 0;

883
	imx6_pcie_pm_turnoff(imx6_pcie);
884 885 886 887 888 889 890 891 892 893 894 895
	imx6_pcie_clk_disable(imx6_pcie);
	imx6_pcie_ltssm_disable(dev);

	return 0;
}

static int imx6_pcie_resume_noirq(struct device *dev)
{
	int ret;
	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
	struct pcie_port *pp = &imx6_pcie->pci->pp;

896
	if (!imx6_pcie_supports_suspend(imx6_pcie))
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
		return 0;

	imx6_pcie_assert_core_reset(imx6_pcie);
	imx6_pcie_init_phy(imx6_pcie);
	imx6_pcie_deassert_core_reset(imx6_pcie);
	dw_pcie_setup_rc(pp);

	ret = imx6_pcie_establish_link(imx6_pcie);
	if (ret < 0)
		dev_info(dev, "pcie link is down after resume.\n");

	return 0;
}
#endif

static const struct dev_pm_ops imx6_pcie_pm_ops = {
	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
				      imx6_pcie_resume_noirq)
};

917
static int imx6_pcie_probe(struct platform_device *pdev)
918
{
919
	struct device *dev = &pdev->dev;
920
	struct dw_pcie *pci;
921 922
	struct imx6_pcie *imx6_pcie;
	struct resource *dbi_base;
923
	struct device_node *node = dev->of_node;
924
	int ret;
925
	u16 val;
926

927
	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
928 929 930
	if (!imx6_pcie)
		return -ENOMEM;

931 932 933 934 935 936
	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
	if (!pci)
		return -ENOMEM;

	pci->dev = dev;
	pci->ops = &dw_pcie_ops;
937

938
	imx6_pcie->pci = pci;
939
	imx6_pcie->variant =
940
		(enum imx6_pcie_variants)of_device_get_match_data(dev);
941

942
	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
943 944 945
	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
	if (IS_ERR(pci->dbi_base))
		return PTR_ERR(pci->dbi_base);
946 947

	/* Fetch GPIOs */
948 949
	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
	imx6_pcie->gpio_active_high = of_property_read_bool(node,
950
						"reset-gpio-active-high");
951
	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
952
		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
953 954 955 956
				imx6_pcie->gpio_active_high ?
					GPIOF_OUT_INIT_HIGH :
					GPIOF_OUT_INIT_LOW,
				"PCIe reset");
957
		if (ret) {
958
			dev_err(dev, "unable to get reset gpio\n");
959 960
			return ret;
		}
961 962
	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
		return imx6_pcie->reset_gpio;
963
	}
964 965

	/* Fetch clocks */
966
	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
L
Lucas Stach 已提交
967
	if (IS_ERR(imx6_pcie->pcie_phy)) {
968
		dev_err(dev, "pcie_phy clock source missing or invalid\n");
L
Lucas Stach 已提交
969
		return PTR_ERR(imx6_pcie->pcie_phy);
970 971
	}

972
	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
L
Lucas Stach 已提交
973
	if (IS_ERR(imx6_pcie->pcie_bus)) {
974
		dev_err(dev, "pcie_bus clock source missing or invalid\n");
L
Lucas Stach 已提交
975
		return PTR_ERR(imx6_pcie->pcie_bus);
976 977
	}

978
	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
L
Lucas Stach 已提交
979
	if (IS_ERR(imx6_pcie->pcie)) {
980
		dev_err(dev, "pcie clock source missing or invalid\n");
L
Lucas Stach 已提交
981
		return PTR_ERR(imx6_pcie->pcie);
982 983
	}

984 985
	switch (imx6_pcie->variant) {
	case IMX6SX:
986
		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
987 988
							   "pcie_inbound_axi");
		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
989
			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
990 991
			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
		}
992 993
		break;
	case IMX7D:
994 995
		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
									    "pciephy");
996
		if (IS_ERR(imx6_pcie->pciephy_reset)) {
997
			dev_err(dev, "Failed to get PCIEPHY reset control\n");
998 999 1000
			return PTR_ERR(imx6_pcie->pciephy_reset);
		}

1001 1002
		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
									 "apps");
1003
		if (IS_ERR(imx6_pcie->apps_reset)) {
1004
			dev_err(dev, "Failed to get PCIE APPS reset control\n");
1005 1006 1007 1008 1009
			return PTR_ERR(imx6_pcie->apps_reset);
		}
		break;
	default:
		break;
1010 1011
	}

1012 1013 1014 1015 1016 1017 1018
	/* Grab turnoff reset */
	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
	if (IS_ERR(imx6_pcie->turnoff_reset)) {
		dev_err(dev, "Failed to get TURNOFF reset control\n");
		return PTR_ERR(imx6_pcie->turnoff_reset);
	}

1019 1020 1021 1022
	/* Grab GPR config register range */
	imx6_pcie->iomuxc_gpr =
		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1023
		dev_err(dev, "unable to find iomuxc registers\n");
1024
		return PTR_ERR(imx6_pcie->iomuxc_gpr);
1025
	}
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046

	/* Grab PCIe PHY Tx Settings */
	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
				 &imx6_pcie->tx_deemph_gen1))
		imx6_pcie->tx_deemph_gen1 = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
				 &imx6_pcie->tx_deemph_gen2_3p5db))
		imx6_pcie->tx_deemph_gen2_3p5db = 0;

	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
				 &imx6_pcie->tx_deemph_gen2_6db))
		imx6_pcie->tx_deemph_gen2_6db = 20;

	if (of_property_read_u32(node, "fsl,tx-swing-full",
				 &imx6_pcie->tx_swing_full))
		imx6_pcie->tx_swing_full = 127;

	if (of_property_read_u32(node, "fsl,tx-swing-low",
				 &imx6_pcie->tx_swing_low))
		imx6_pcie->tx_swing_low = 127;
1047

1048
	/* Limit link speed */
1049
	ret = of_property_read_u32(node, "fsl,max-link-speed",
1050 1051 1052 1053
				   &imx6_pcie->link_gen);
	if (ret)
		imx6_pcie->link_gen = 1;

1054 1055 1056 1057 1058 1059 1060
	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
	if (IS_ERR(imx6_pcie->vpcie)) {
		if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
			return -EPROBE_DEFER;
		imx6_pcie->vpcie = NULL;
	}

1061 1062
	platform_set_drvdata(pdev, imx6_pcie);

L
Leonard Crestez 已提交
1063 1064 1065 1066
	ret = imx6_pcie_attach_pd(dev);
	if (ret)
		return ret;

1067
	ret = imx6_add_pcie_port(imx6_pcie, pdev);
1068
	if (ret < 0)
1069
		return ret;
1070

1071 1072 1073 1074 1075 1076 1077 1078
	if (pci_msi_enabled()) {
		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
					PCI_MSI_FLAGS);
		val |= PCI_MSI_FLAGS_ENABLE;
		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
				   val);
	}

1079 1080 1081
	return 0;
}

1082 1083 1084 1085 1086
static void imx6_pcie_shutdown(struct platform_device *pdev)
{
	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);

	/* bring down link, so bootloader gets clean state in case of reboot */
1087
	imx6_pcie_assert_core_reset(imx6_pcie);
1088 1089
}

1090
static const struct of_device_id imx6_pcie_of_match[] = {
1091 1092
	{ .compatible = "fsl,imx6q-pcie",  .data = (void *)IMX6Q,  },
	{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
1093
	{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
1094
	{ .compatible = "fsl,imx7d-pcie",  .data = (void *)IMX7D,  },
1095 1096 1097 1098 1099 1100
	{},
};

static struct platform_driver imx6_pcie_driver = {
	.driver = {
		.name	= "imx6q-pcie",
1101
		.of_match_table = imx6_pcie_of_match,
1102
		.suppress_bind_attrs = true,
1103
		.pm = &imx6_pcie_pm_ops,
1104
	},
1105
	.probe    = imx6_pcie_probe,
1106
	.shutdown = imx6_pcie_shutdown,
1107 1108 1109 1110
};

static int __init imx6_pcie_init(void)
{
1111 1112 1113 1114 1115 1116 1117
	/*
	 * Since probe() can be deferred we need to make sure that
	 * hook_fault_code is not called after __init memory is freed
	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
	 * we can install the handler here without risking it
	 * accessing some uninitialized driver state.
	 */
1118 1119
	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
			"external abort on non-linefetch");
1120 1121

	return platform_driver_register(&imx6_pcie_driver);
1122
}
1123
device_initcall(imx6_pcie_init);