pcie-designware.c 23.1 KB
Newer Older
1
/*
2
 * Synopsys Designware PCIe host controller driver
3 4 5 6 7 8 9 10 11 12 13
 *
 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * Author: Jingoo Han <jg1.han@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

J
Jingoo Han 已提交
14 15
#include <linux/irq.h>
#include <linux/irqdomain.h>
16
#include <linux/kernel.h>
J
Jingoo Han 已提交
17
#include <linux/msi.h>
18
#include <linux/of_address.h>
19
#include <linux/of_pci.h>
20 21
#include <linux/pci.h>
#include <linux/pci_regs.h>
22
#include <linux/platform_device.h>
23
#include <linux/types.h>
24
#include <linux/delay.h>
25

26
#include "pcie-designware.h"
27

28 29 30 31 32
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES		10
#define LINK_WAIT_USLEEP_MIN		90000
#define LINK_WAIT_USLEEP_MAX		100000

33 34 35 36 37 38
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES	5
#define LINK_WAIT_IATU_MIN		9000
#define LINK_WAIT_IATU_MAX		10000

/* Synopsys-specific PCIe configuration registers */
39 40
#define PCIE_PORT_LINK_CONTROL		0x710
#define PORT_LINK_MODE_MASK		(0x3f << 16)
41 42
#define PORT_LINK_MODE_1_LANES		(0x1 << 16)
#define PORT_LINK_MODE_2_LANES		(0x3 << 16)
43
#define PORT_LINK_MODE_4_LANES		(0x7 << 16)
44
#define PORT_LINK_MODE_8_LANES		(0xf << 16)
45 46 47

#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)
48
#define PORT_LOGIC_LINK_WIDTH_MASK	(0x1f << 8)
49 50 51
#define PORT_LOGIC_LINK_WIDTH_1_LANES	(0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES	(0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES	(0x4 << 8)
52
#define PORT_LOGIC_LINK_WIDTH_8_LANES	(0x8 << 8)
53 54 55 56 57 58 59 60 61 62

#define PCIE_MSI_ADDR_LO		0x820
#define PCIE_MSI_ADDR_HI		0x824
#define PCIE_MSI_INTR0_ENABLE		0x828
#define PCIE_MSI_INTR0_MASK		0x82C
#define PCIE_MSI_INTR0_STATUS		0x830

#define PCIE_ATU_VIEWPORT		0x900
#define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
63
#define PCIE_ATU_REGION_INDEX2		(0x2 << 0)
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
#define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
#define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
#define PCIE_ATU_CR1			0x904
#define PCIE_ATU_TYPE_MEM		(0x0 << 0)
#define PCIE_ATU_TYPE_IO		(0x2 << 0)
#define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
#define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
#define PCIE_ATU_CR2			0x908
#define PCIE_ATU_ENABLE			(0x1 << 31)
#define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
#define PCIE_ATU_LOWER_BASE		0x90C
#define PCIE_ATU_UPPER_BASE		0x910
#define PCIE_ATU_LIMIT			0x914
#define PCIE_ATU_LOWER_TARGET		0x918
#define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
#define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
#define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET		0x91C

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * iATU Unroll-specific register definitions
 * From 4.80 core version the address translation will be made by unroll
 */
#define PCIE_ATU_UNR_REGION_CTRL1	0x00
#define PCIE_ATU_UNR_REGION_CTRL2	0x04
#define PCIE_ATU_UNR_LOWER_BASE		0x08
#define PCIE_ATU_UNR_UPPER_BASE		0x0C
#define PCIE_ATU_UNR_LIMIT		0x10
#define PCIE_ATU_UNR_LOWER_TARGET	0x14
#define PCIE_ATU_UNR_UPPER_TARGET	0x18

/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)  ((0x3 << 20) | (region << 9))

98 99 100
/* PCIe Port Logic registers */
#define PLR_OFFSET			0x700
#define PCIE_PHY_DEBUG_R1		(PLR_OFFSET + 0x2c)
101 102
#define PCIE_PHY_DEBUG_R1_LINK_UP	(0x1 << 4)
#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING	(0x1 << 29)
103

104
static struct pci_ops dw_pcie_ops;
105

106
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
107
{
108 109 110 111 112
	if ((uintptr_t)addr & (size - 1)) {
		*val = 0;
		return PCIBIOS_BAD_REGISTER_NUMBER;
	}

113 114
	if (size == 4)
		*val = readl(addr);
115
	else if (size == 2)
116
		*val = readw(addr);
117
	else if (size == 1)
118
		*val = readb(addr);
119 120
	else {
		*val = 0;
121
		return PCIBIOS_BAD_REGISTER_NUMBER;
122
	}
123 124 125 126

	return PCIBIOS_SUCCESSFUL;
}

127
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
128
{
129 130 131
	if ((uintptr_t)addr & (size - 1))
		return PCIBIOS_BAD_REGISTER_NUMBER;

132 133 134
	if (size == 4)
		writel(val, addr);
	else if (size == 2)
135
		writew(val, addr);
136
	else if (size == 1)
137
		writeb(val, addr);
138 139 140 141 142 143
	else
		return PCIBIOS_BAD_REGISTER_NUMBER;

	return PCIBIOS_SUCCESSFUL;
}

144
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
145
{
146
	if (pp->ops->readl_rc)
147 148 149
		return pp->ops->readl_rc(pp, pp->dbi_base + reg);

	return readl(pp->dbi_base + reg);
150 151
}

152
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
153
{
154
	if (pp->ops->writel_rc)
155
		pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
156
	else
157
		writel(val, pp->dbi_base + reg);
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->readl_rc)
		return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);

	return readl(pp->dbi_base + offset + reg);
}

static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
					 u32 val, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->writel_rc)
		pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
	else
		writel(val, pp->dbi_base + offset + reg);
}

181 182
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
			       u32 *val)
183
{
184
	if (pp->ops->rd_own_conf)
185
		return pp->ops->rd_own_conf(pp, where, size, val);
186

187
	return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
188 189
}

190 191
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
			       u32 val)
192
{
193
	if (pp->ops->wr_own_conf)
194
		return pp->ops->wr_own_conf(pp, where, size, val);
195

196
	return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
197 198
}

199 200 201
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
		int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
202
	u32 retries, val;
203

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	if (pp->iatu_unroll_enabled) {
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			type, PCIE_ATU_UNR_REGION_CTRL1);
		dw_pcie_writel_unroll(pp, index,
			PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
	} else {
		dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
						PCIE_ATU_VIEWPORT);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
						PCIE_ATU_LOWER_BASE);
		dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
						PCIE_ATU_UPPER_BASE);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
						PCIE_ATU_LIMIT);
		dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
						PCIE_ATU_LOWER_TARGET);
		dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
						PCIE_ATU_UPPER_TARGET);
		dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
		dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
	}
235 236 237 238 239

	/*
	 * Make sure ATU enable takes effect before any subsequent config
	 * and I/O accesses.
	 */
240
	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
241 242 243 244 245 246
		if (pp->iatu_unroll_enabled)
			val = dw_pcie_readl_unroll(pp, index,
						   PCIE_ATU_UNR_REGION_CTRL2);
		else
			val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);

247 248 249 250 251 252
		if (val == PCIE_ATU_ENABLE)
			return;

		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
	}
	dev_err(pp->dev, "iATU is not being enabled\n");
253 254
}

J
Jingoo Han 已提交
255 256
static struct irq_chip dw_msi_irq_chip = {
	.name = "PCI-MSI",
257 258 259 260
	.irq_enable = pci_msi_unmask_irq,
	.irq_disable = pci_msi_mask_irq,
	.irq_mask = pci_msi_mask_irq,
	.irq_unmask = pci_msi_unmask_irq,
J
Jingoo Han 已提交
261 262 263
};

/* MSI int handler */
264
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
J
Jingoo Han 已提交
265 266
{
	unsigned long val;
267
	int i, pos, irq;
268
	irqreturn_t ret = IRQ_NONE;
J
Jingoo Han 已提交
269 270 271 272 273

	for (i = 0; i < MAX_MSI_CTRLS; i++) {
		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
				(u32 *)&val);
		if (val) {
274
			ret = IRQ_HANDLED;
J
Jingoo Han 已提交
275 276
			pos = 0;
			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
277 278
				irq = irq_find_mapping(pp->irq_domain,
						i * 32 + pos);
H
Harro Haan 已提交
279 280 281
				dw_pcie_wr_own_conf(pp,
						PCIE_MSI_INTR0_STATUS + i * 12,
						4, 1 << pos);
282
				generic_handle_irq(irq);
J
Jingoo Han 已提交
283 284 285 286
				pos++;
			}
		}
	}
287 288

	return ret;
J
Jingoo Han 已提交
289 290 291 292
}

void dw_pcie_msi_init(struct pcie_port *pp)
{
293 294
	u64 msi_target;

J
Jingoo Han 已提交
295
	pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
296
	msi_target = virt_to_phys((void *)pp->msi_data);
J
Jingoo Han 已提交
297 298 299

	/* program the msi_data */
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
300 301 302
			    (u32)(msi_target & 0xffffffff));
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
			    (u32)(msi_target >> 32 & 0xffffffff));
J
Jingoo Han 已提交
303 304
}

305 306 307 308 309 310 311 312 313 314 315
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val &= ~(1 << bit);
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

316
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
317
			    unsigned int nvec, unsigned int pos)
318
{
319
	unsigned int i;
320

321
	for (i = 0; i < nvec; i++) {
322
		irq_set_msi_desc_off(irq_base, i, NULL);
323
		/* Disable corresponding interrupt on MSI controller */
324 325 326 327
		if (pp->ops->msi_clear_irq)
			pp->ops->msi_clear_irq(pp, pos + i);
		else
			dw_pcie_msi_clear_irq(pp, pos + i);
328
	}
329 330

	bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
331 332
}

333 334 335 336 337 338 339 340 341 342 343
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val |= 1 << bit;
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

J
Jingoo Han 已提交
344 345
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
346
	int irq, pos0, i;
347
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
J
Jingoo Han 已提交
348

349 350 351 352
	pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
				       order_base_2(no_irqs));
	if (pos0 < 0)
		goto no_valid_irq;
J
Jingoo Han 已提交
353

354 355
	irq = irq_find_mapping(pp->irq_domain, pos0);
	if (!irq)
J
Jingoo Han 已提交
356 357
		goto no_valid_irq;

358 359 360 361 362 363 364
	/*
	 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
	 * descs so there is no need to allocate descs here. We can therefore
	 * assume that if irq_find_mapping above returns non-zero, then the
	 * descs are also successfully allocated.
	 */

365
	for (i = 0; i < no_irqs; i++) {
366 367 368 369
		if (irq_set_msi_desc_off(irq, i, desc) != 0) {
			clear_irq_range(pp, irq, i, pos0);
			goto no_valid_irq;
		}
J
Jingoo Han 已提交
370
		/*Enable corresponding interrupt in MSI interrupt controller */
371 372 373 374
		if (pp->ops->msi_set_irq)
			pp->ops->msi_set_irq(pp, pos0 + i);
		else
			dw_pcie_msi_set_irq(pp, pos0 + i);
J
Jingoo Han 已提交
375 376 377
	}

	*pos = pos0;
378 379 380
	desc->nvec_used = no_irqs;
	desc->msi_attrib.multiple = order_base_2(no_irqs);

J
Jingoo Han 已提交
381 382 383 384 385 386 387
	return irq;

no_valid_irq:
	*pos = pos0;
	return -ENOSPC;
}

388
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
J
Jingoo Han 已提交
389 390
{
	struct msi_msg msg;
391
	u64 msi_target;
J
Jingoo Han 已提交
392

393
	if (pp->ops->get_msi_addr)
394
		msi_target = pp->ops->get_msi_addr(pp);
395
	else
396 397 398 399
		msi_target = virt_to_phys((void *)pp->msi_data);

	msg.address_lo = (u32)(msi_target & 0xffffffff);
	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
400 401 402 403 404 405

	if (pp->ops->get_msi_data)
		msg.data = pp->ops->get_msi_data(pp, pos);
	else
		msg.data = pos;

406
	pci_write_msi_msg(irq, &msg);
407 408 409 410 411 412
}

static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
			struct msi_desc *desc)
{
	int irq, pos;
413
	struct pcie_port *pp = pdev->bus->sysdata;
414 415 416 417 418 419 420 421 422

	if (desc->msi_attrib.is_msix)
		return -EINVAL;

	irq = assign_irq(1, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);
J
Jingoo Han 已提交
423 424 425 426

	return 0;
}

427 428 429 430 431 432
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
			     int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
	int irq, pos;
	struct msi_desc *desc;
433
	struct pcie_port *pp = pdev->bus->sysdata;
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

	/* MSI-X interrupts are not supported */
	if (type == PCI_CAP_ID_MSIX)
		return -EINVAL;

	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);

	irq = assign_irq(nvec, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);

	return 0;
#else
	return -EINVAL;
#endif
}

454
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
J
Jingoo Han 已提交
455
{
456
	struct irq_data *data = irq_get_irq_data(irq);
457
	struct msi_desc *msi = irq_data_get_msi_desc(data);
458
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
459 460

	clear_irq_range(pp, irq, 1, data->hwirq);
J
Jingoo Han 已提交
461 462
}

463
static struct msi_controller dw_pcie_msi_chip = {
J
Jingoo Han 已提交
464
	.setup_irq = dw_msi_setup_irq,
465
	.setup_irqs = dw_msi_setup_irqs,
J
Jingoo Han 已提交
466 467 468
	.teardown_irq = dw_msi_teardown_irq,
};

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
int dw_pcie_wait_for_link(struct pcie_port *pp)
{
	int retries;

	/* check if the link is up or not */
	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
		if (dw_pcie_link_up(pp)) {
			dev_info(pp->dev, "link up\n");
			return 0;
		}
		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
	}

	dev_err(pp->dev, "phy link never came up\n");

	return -ETIMEDOUT;
}

487 488
int dw_pcie_link_up(struct pcie_port *pp)
{
489 490
	u32 val;

491 492
	if (pp->ops->link_up)
		return pp->ops->link_up(pp);
493

494
	val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
495 496
	return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
		(!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
497 498
}

J
Jingoo Han 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
			irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops msi_domain_ops = {
	.map = dw_pcie_msi_map,
};

512 513 514 515 516 517 518 519 520 521 522
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
	u32 val;

	val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
	if (val == 0xffffffff)
		return 1;

	return 0;
}

523
int dw_pcie_host_init(struct pcie_port *pp)
524 525
{
	struct device_node *np = pp->dev->of_node;
526
	struct platform_device *pdev = to_platform_device(pp->dev);
527
	struct pci_bus *bus, *child;
528
	struct resource *cfg_res;
529
	int i, ret;
530
	LIST_HEAD(res);
531
	struct resource_entry *win, *tmp;
J
Jingoo Han 已提交
532

533 534
	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
	if (cfg_res) {
535 536
		pp->cfg0_size = resource_size(cfg_res)/2;
		pp->cfg1_size = resource_size(cfg_res)/2;
537
		pp->cfg0_base = cfg_res->start;
538
		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
539
	} else if (!pp->va_cfg0_base) {
540 541 542
		dev_err(pp->dev, "missing *config* reg space\n");
	}

543 544 545
	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
	if (ret)
		return ret;
546

547 548 549 550
	ret = devm_request_pci_bus_resources(&pdev->dev, &res);
	if (ret)
		goto error;

551
	/* Get the I/O and memory ranges from DT */
552
	resource_list_for_each_entry_safe(win, tmp, &res) {
553 554
		switch (resource_type(win->res)) {
		case IORESOURCE_IO:
555 556
			ret = pci_remap_iospace(win->res, pp->io_base);
			if (ret) {
557
				dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
558 559 560 561 562 563 564 565
					 ret, win->res);
				resource_list_destroy_entry(win);
			} else {
				pp->io = win->res;
				pp->io->name = "I/O";
				pp->io_size = resource_size(pp->io);
				pp->io_bus_addr = pp->io->start - win->offset;
			}
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
			break;
		case IORESOURCE_MEM:
			pp->mem = win->res;
			pp->mem->name = "MEM";
			pp->mem_size = resource_size(pp->mem);
			pp->mem_bus_addr = pp->mem->start - win->offset;
			break;
		case 0:
			pp->cfg = win->res;
			pp->cfg0_size = resource_size(pp->cfg)/2;
			pp->cfg1_size = resource_size(pp->cfg)/2;
			pp->cfg0_base = pp->cfg->start;
			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
			break;
		case IORESOURCE_BUS:
			pp->busn = win->res;
			break;
583
		}
584 585
	}

586
	if (!pp->dbi_base) {
587 588
		pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
					resource_size(pp->cfg));
589 590
		if (!pp->dbi_base) {
			dev_err(pp->dev, "error with ioremap\n");
591 592
			ret = -ENOMEM;
			goto error;
593 594 595
		}
	}

596
	pp->mem_base = pp->mem->start;
597 598

	if (!pp->va_cfg0_base) {
599
		pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
600
						pp->cfg0_size);
601 602
		if (!pp->va_cfg0_base) {
			dev_err(pp->dev, "error with ioremap in function\n");
603 604
			ret = -ENOMEM;
			goto error;
605
		}
606
	}
607

608
	if (!pp->va_cfg1_base) {
609
		pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
610
						pp->cfg1_size);
611 612
		if (!pp->va_cfg1_base) {
			dev_err(pp->dev, "error with ioremap\n");
613 614
			ret = -ENOMEM;
			goto error;
615
		}
616 617
	}

618 619 620
	ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
	if (ret)
		pp->lanes = 0;
621

622 623 624 625
	ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
	if (ret)
		pp->num_viewport = 2;

J
Jingoo Han 已提交
626
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
627 628 629 630 631 632
		if (!pp->ops->msi_host_init) {
			pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
						MAX_MSI_IRQS, &msi_domain_ops,
						&dw_pcie_msi_chip);
			if (!pp->irq_domain) {
				dev_err(pp->dev, "irq domain init failed\n");
633 634
				ret = -ENXIO;
				goto error;
635
			}
J
Jingoo Han 已提交
636

637 638 639 640 641
			for (i = 0; i < MAX_MSI_IRQS; i++)
				irq_create_mapping(pp->irq_domain, i);
		} else {
			ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
			if (ret < 0)
642
				goto error;
643
		}
J
Jingoo Han 已提交
644 645
	}

646 647
	pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);

648 649 650
	if (pp->ops->host_init)
		pp->ops->host_init(pp);

651 652 653 654 655 656 657 658 659
	pp->root_bus_nr = pp->busn->start;
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
					    &dw_pcie_ops, pp, &res,
					    &dw_pcie_msi_chip);
		dw_pcie_msi_chip.dev = pp->dev;
	} else
		bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
					pp, &res);
660 661 662 663
	if (!bus) {
		ret = -ENOMEM;
		goto error;
	}
664 665 666 667 668 669 670

	if (pp->ops->scan_bus)
		pp->ops->scan_bus(pp);

#ifdef CONFIG_ARM
	/* support old dtbs that incorrectly describe IRQs */
	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
671 672
#endif

673 674
	pci_bus_size_bridges(bus);
	pci_bus_assign_resources(bus);
675

676 677
	list_for_each_entry(child, &bus->children, node)
		pcie_bus_configure_settings(child);
678

679
	pci_bus_add_devices(bus);
680
	return 0;
681 682 683 684

error:
	pci_free_resource_list(&res);
	return ret;
685 686 687
}

static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
688 689
		u32 devfn, int where, int size, u32 *val)
{
690
	int ret, type;
691
	u32 busdev, cfg_size;
692 693
	u64 cpu_addr;
	void __iomem *va_cfg_base;
694

695 696 697
	if (pp->ops->rd_other_conf)
		return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);

698 699 700 701
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
702
		type = PCIE_ATU_TYPE_CFG0;
703
		cpu_addr = pp->cfg0_base;
704 705
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
706
	} else {
707
		type = PCIE_ATU_TYPE_CFG1;
708
		cpu_addr = pp->cfg1_base;
709 710
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
711 712
	}

713
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
714 715
				  type, cpu_addr,
				  busdev, cfg_size);
716
	ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
717
	if (pp->num_viewport <= 2)
718
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
719 720
					  PCIE_ATU_TYPE_IO, pp->io_base,
					  pp->io_bus_addr, pp->io_size);
721

722 723 724
	return ret;
}

725
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
726 727
		u32 devfn, int where, int size, u32 val)
{
728
	int ret, type;
729
	u32 busdev, cfg_size;
730 731
	u64 cpu_addr;
	void __iomem *va_cfg_base;
732

733 734 735
	if (pp->ops->wr_other_conf)
		return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);

736 737 738 739
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
740
		type = PCIE_ATU_TYPE_CFG0;
741
		cpu_addr = pp->cfg0_base;
742 743
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
744
	} else {
745
		type = PCIE_ATU_TYPE_CFG1;
746
		cpu_addr = pp->cfg1_base;
747 748
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
749 750
	}

751
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
752 753
				  type, cpu_addr,
				  busdev, cfg_size);
754
	ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
755
	if (pp->num_viewport <= 2)
756
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
757 758
					  PCIE_ATU_TYPE_IO, pp->io_base,
					  pp->io_bus_addr, pp->io_size);
759

760 761 762
	return ret;
}

763
static int dw_pcie_valid_config(struct pcie_port *pp,
764 765 766 767
				struct pci_bus *bus, int dev)
{
	/* If there is no link, then there is no device */
	if (bus->number != pp->root_bus_nr) {
768
		if (!dw_pcie_link_up(pp))
769 770 771 772 773 774 775 776 777 778
			return 0;
	}

	/* access only one slot on each root port */
	if (bus->number == pp->root_bus_nr && dev > 0)
		return 0;

	return 1;
}

779
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
780 781
			int size, u32 *val)
{
782
	struct pcie_port *pp = bus->sysdata;
783

784
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
785 786 787 788
		*val = 0xffffffff;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

789 790
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_rd_own_conf(pp, where, size, val);
791

792
	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
793 794
}

795
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
796 797
			int where, int size, u32 val)
{
798
	struct pcie_port *pp = bus->sysdata;
799

800
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
801 802
		return PCIBIOS_DEVICE_NOT_FOUND;

803 804
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_wr_own_conf(pp, where, size, val);
805

806
	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
807 808
}

809 810 811
static struct pci_ops dw_pcie_ops = {
	.read = dw_pcie_rd_conf,
	.write = dw_pcie_wr_conf,
812 813
};

814
void dw_pcie_setup_rc(struct pcie_port *pp)
815 816 817
{
	u32 val;

818
	/* set the number of lanes */
819
	val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
820
	val &= ~PORT_LINK_MODE_MASK;
821 822 823 824 825 826 827 828 829 830
	switch (pp->lanes) {
	case 1:
		val |= PORT_LINK_MODE_1_LANES;
		break;
	case 2:
		val |= PORT_LINK_MODE_2_LANES;
		break;
	case 4:
		val |= PORT_LINK_MODE_4_LANES;
		break;
831 832 833
	case 8:
		val |= PORT_LINK_MODE_8_LANES;
		break;
834 835 836
	default:
		dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
		return;
837
	}
838
	dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
839 840

	/* set link width speed control register */
841
	val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
842
	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
843 844 845 846 847 848 849 850 851 852
	switch (pp->lanes) {
	case 1:
		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
		break;
	case 2:
		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
		break;
	case 4:
		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
		break;
853 854 855
	case 8:
		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
		break;
856
	}
857
	dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
858 859

	/* setup RC BARs */
860
	dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
861
	dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
862 863

	/* setup interrupt pins */
864
	val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
865 866
	val &= 0xffff00ff;
	val |= 0x00000100;
867
	dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
868 869

	/* setup bus numbers */
870
	val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
871 872
	val &= 0xff000000;
	val |= 0x00010100;
873
	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
874 875

	/* setup command register */
876
	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
877 878 879
	val &= 0xffff0000;
	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
880
	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
881 882 883 884 885 886

	/*
	 * If the platform provides ->rd_other_conf, it means the platform
	 * uses its own address translation component rather than ATU, so
	 * we should not program the ATU here.
	 */
887
	if (!pp->ops->rd_other_conf) {
888
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
889 890
					  PCIE_ATU_TYPE_MEM, pp->mem_base,
					  pp->mem_bus_addr, pp->mem_size);
891 892 893 894 895
		if (pp->num_viewport > 2)
			dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
						  PCIE_ATU_TYPE_IO, pp->io_base,
						  pp->io_bus_addr, pp->io_size);
	}
896 897 898 899 900 901 902 903 904

	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);

	/* program correct class for RC */
	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);

	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
	val |= PORT_LOGIC_SPEED_CHANGE;
	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
905
}