pcie-designware.c 22.9 KB
Newer Older
1
/*
2
 * Synopsys Designware PCIe host controller driver
3 4 5 6 7 8 9 10 11 12 13
 *
 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * Author: Jingoo Han <jg1.han@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

J
Jingoo Han 已提交
14 15
#include <linux/irq.h>
#include <linux/irqdomain.h>
16
#include <linux/kernel.h>
J
Jingoo Han 已提交
17
#include <linux/msi.h>
18
#include <linux/of_address.h>
19
#include <linux/of_pci.h>
20 21
#include <linux/pci.h>
#include <linux/pci_regs.h>
22
#include <linux/platform_device.h>
23
#include <linux/types.h>
24
#include <linux/delay.h>
25

26
#include "pcie-designware.h"
27

28 29 30 31 32
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES		10
#define LINK_WAIT_USLEEP_MIN		90000
#define LINK_WAIT_USLEEP_MAX		100000

33 34 35 36 37 38
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES	5
#define LINK_WAIT_IATU_MIN		9000
#define LINK_WAIT_IATU_MAX		10000

/* Synopsys-specific PCIe configuration registers */
39 40
#define PCIE_PORT_LINK_CONTROL		0x710
#define PORT_LINK_MODE_MASK		(0x3f << 16)
41 42
#define PORT_LINK_MODE_1_LANES		(0x1 << 16)
#define PORT_LINK_MODE_2_LANES		(0x3 << 16)
43
#define PORT_LINK_MODE_4_LANES		(0x7 << 16)
44
#define PORT_LINK_MODE_8_LANES		(0xf << 16)
45 46 47

#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)
48
#define PORT_LOGIC_LINK_WIDTH_MASK	(0x1f << 8)
49 50 51
#define PORT_LOGIC_LINK_WIDTH_1_LANES	(0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES	(0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES	(0x4 << 8)
52
#define PORT_LOGIC_LINK_WIDTH_8_LANES	(0x8 << 8)
53 54 55 56 57 58 59 60 61 62

#define PCIE_MSI_ADDR_LO		0x820
#define PCIE_MSI_ADDR_HI		0x824
#define PCIE_MSI_INTR0_ENABLE		0x828
#define PCIE_MSI_INTR0_MASK		0x82C
#define PCIE_MSI_INTR0_STATUS		0x830

#define PCIE_ATU_VIEWPORT		0x900
#define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
63
#define PCIE_ATU_REGION_INDEX2		(0x2 << 0)
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
#define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
#define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
#define PCIE_ATU_CR1			0x904
#define PCIE_ATU_TYPE_MEM		(0x0 << 0)
#define PCIE_ATU_TYPE_IO		(0x2 << 0)
#define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
#define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
#define PCIE_ATU_CR2			0x908
#define PCIE_ATU_ENABLE			(0x1 << 31)
#define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
#define PCIE_ATU_LOWER_BASE		0x90C
#define PCIE_ATU_UPPER_BASE		0x910
#define PCIE_ATU_LIMIT			0x914
#define PCIE_ATU_LOWER_TARGET		0x918
#define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
#define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
#define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET		0x91C

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * iATU Unroll-specific register definitions
 * From 4.80 core version the address translation will be made by unroll
 */
#define PCIE_ATU_UNR_REGION_CTRL1	0x00
#define PCIE_ATU_UNR_REGION_CTRL2	0x04
#define PCIE_ATU_UNR_LOWER_BASE		0x08
#define PCIE_ATU_UNR_UPPER_BASE		0x0C
#define PCIE_ATU_UNR_LIMIT		0x10
#define PCIE_ATU_UNR_LOWER_TARGET	0x14
#define PCIE_ATU_UNR_UPPER_TARGET	0x18

/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)  ((0x3 << 20) | (region << 9))

98 99 100
/* PCIe Port Logic registers */
#define PLR_OFFSET			0x700
#define PCIE_PHY_DEBUG_R1		(PLR_OFFSET + 0x2c)
101 102
#define PCIE_PHY_DEBUG_R1_LINK_UP	(0x1 << 4)
#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING	(0x1 << 29)
103

104
static struct pci_ops dw_pcie_ops;
105

106
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
107
{
108 109 110 111 112
	if ((uintptr_t)addr & (size - 1)) {
		*val = 0;
		return PCIBIOS_BAD_REGISTER_NUMBER;
	}

113 114
	if (size == 4)
		*val = readl(addr);
115
	else if (size == 2)
116
		*val = readw(addr);
117
	else if (size == 1)
118
		*val = readb(addr);
119 120
	else {
		*val = 0;
121
		return PCIBIOS_BAD_REGISTER_NUMBER;
122
	}
123 124 125 126

	return PCIBIOS_SUCCESSFUL;
}

127
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
128
{
129 130 131
	if ((uintptr_t)addr & (size - 1))
		return PCIBIOS_BAD_REGISTER_NUMBER;

132 133 134
	if (size == 4)
		writel(val, addr);
	else if (size == 2)
135
		writew(val, addr);
136
	else if (size == 1)
137
		writeb(val, addr);
138 139 140 141 142 143
	else
		return PCIBIOS_BAD_REGISTER_NUMBER;

	return PCIBIOS_SUCCESSFUL;
}

144
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
145
{
146
	if (pp->ops->readl_rc)
147
		return pp->ops->readl_rc(pp, reg);
148 149

	return readl(pp->dbi_base + reg);
150 151
}

152
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
153
{
154
	if (pp->ops->writel_rc)
155
		pp->ops->writel_rc(pp, val, reg);
156
	else
157
		writel(val, pp->dbi_base + reg);
158 159
}

160 161 162 163
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

164
	return dw_pcie_readl_rc(pp, offset + reg);
165 166 167 168 169 170 171
}

static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
					 u32 val, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

172
	dw_pcie_writel_rc(pp, val, offset + reg);
173 174
}

175 176
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
			       u32 *val)
177
{
178
	if (pp->ops->rd_own_conf)
179
		return pp->ops->rd_own_conf(pp, where, size, val);
180

181
	return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
182 183
}

184 185
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
			       u32 val)
186
{
187
	if (pp->ops->wr_own_conf)
188
		return pp->ops->wr_own_conf(pp, where, size, val);
189

190
	return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
191 192
}

193 194 195
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
		int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
196
	u32 retries, val;
197

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	if (pp->iatu_unroll_enabled) {
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			type, PCIE_ATU_UNR_REGION_CTRL1);
		dw_pcie_writel_unroll(pp, index,
			PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
	} else {
		dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
						PCIE_ATU_VIEWPORT);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
						PCIE_ATU_LOWER_BASE);
		dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
						PCIE_ATU_UPPER_BASE);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
						PCIE_ATU_LIMIT);
		dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
						PCIE_ATU_LOWER_TARGET);
		dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
						PCIE_ATU_UPPER_TARGET);
		dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
		dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
	}
229 230 231 232 233

	/*
	 * Make sure ATU enable takes effect before any subsequent config
	 * and I/O accesses.
	 */
234
	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
235 236 237 238 239 240
		if (pp->iatu_unroll_enabled)
			val = dw_pcie_readl_unroll(pp, index,
						   PCIE_ATU_UNR_REGION_CTRL2);
		else
			val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);

241 242 243 244 245 246
		if (val == PCIE_ATU_ENABLE)
			return;

		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
	}
	dev_err(pp->dev, "iATU is not being enabled\n");
247 248
}

J
Jingoo Han 已提交
249 250
static struct irq_chip dw_msi_irq_chip = {
	.name = "PCI-MSI",
251 252 253 254
	.irq_enable = pci_msi_unmask_irq,
	.irq_disable = pci_msi_mask_irq,
	.irq_mask = pci_msi_mask_irq,
	.irq_unmask = pci_msi_unmask_irq,
J
Jingoo Han 已提交
255 256 257
};

/* MSI int handler */
258
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
J
Jingoo Han 已提交
259 260
{
	unsigned long val;
261
	int i, pos, irq;
262
	irqreturn_t ret = IRQ_NONE;
J
Jingoo Han 已提交
263 264 265 266 267

	for (i = 0; i < MAX_MSI_CTRLS; i++) {
		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
				(u32 *)&val);
		if (val) {
268
			ret = IRQ_HANDLED;
J
Jingoo Han 已提交
269 270
			pos = 0;
			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
271 272
				irq = irq_find_mapping(pp->irq_domain,
						i * 32 + pos);
H
Harro Haan 已提交
273 274 275
				dw_pcie_wr_own_conf(pp,
						PCIE_MSI_INTR0_STATUS + i * 12,
						4, 1 << pos);
276
				generic_handle_irq(irq);
J
Jingoo Han 已提交
277 278 279 280
				pos++;
			}
		}
	}
281 282

	return ret;
J
Jingoo Han 已提交
283 284 285 286
}

void dw_pcie_msi_init(struct pcie_port *pp)
{
287 288
	u64 msi_target;

J
Jingoo Han 已提交
289
	pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
290
	msi_target = virt_to_phys((void *)pp->msi_data);
J
Jingoo Han 已提交
291 292 293

	/* program the msi_data */
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
294 295 296
			    (u32)(msi_target & 0xffffffff));
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
			    (u32)(msi_target >> 32 & 0xffffffff));
J
Jingoo Han 已提交
297 298
}

299 300 301 302 303 304 305 306 307 308 309
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val &= ~(1 << bit);
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

310
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
311
			    unsigned int nvec, unsigned int pos)
312
{
313
	unsigned int i;
314

315
	for (i = 0; i < nvec; i++) {
316
		irq_set_msi_desc_off(irq_base, i, NULL);
317
		/* Disable corresponding interrupt on MSI controller */
318 319 320 321
		if (pp->ops->msi_clear_irq)
			pp->ops->msi_clear_irq(pp, pos + i);
		else
			dw_pcie_msi_clear_irq(pp, pos + i);
322
	}
323 324

	bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
325 326
}

327 328 329 330 331 332 333 334 335 336 337
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val |= 1 << bit;
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

J
Jingoo Han 已提交
338 339
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
340
	int irq, pos0, i;
341
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
J
Jingoo Han 已提交
342

343 344 345 346
	pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
				       order_base_2(no_irqs));
	if (pos0 < 0)
		goto no_valid_irq;
J
Jingoo Han 已提交
347

348 349
	irq = irq_find_mapping(pp->irq_domain, pos0);
	if (!irq)
J
Jingoo Han 已提交
350 351
		goto no_valid_irq;

352 353 354 355 356 357 358
	/*
	 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
	 * descs so there is no need to allocate descs here. We can therefore
	 * assume that if irq_find_mapping above returns non-zero, then the
	 * descs are also successfully allocated.
	 */

359
	for (i = 0; i < no_irqs; i++) {
360 361 362 363
		if (irq_set_msi_desc_off(irq, i, desc) != 0) {
			clear_irq_range(pp, irq, i, pos0);
			goto no_valid_irq;
		}
J
Jingoo Han 已提交
364
		/*Enable corresponding interrupt in MSI interrupt controller */
365 366 367 368
		if (pp->ops->msi_set_irq)
			pp->ops->msi_set_irq(pp, pos0 + i);
		else
			dw_pcie_msi_set_irq(pp, pos0 + i);
J
Jingoo Han 已提交
369 370 371
	}

	*pos = pos0;
372 373 374
	desc->nvec_used = no_irqs;
	desc->msi_attrib.multiple = order_base_2(no_irqs);

J
Jingoo Han 已提交
375 376 377 378 379 380 381
	return irq;

no_valid_irq:
	*pos = pos0;
	return -ENOSPC;
}

382
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
J
Jingoo Han 已提交
383 384
{
	struct msi_msg msg;
385
	u64 msi_target;
J
Jingoo Han 已提交
386

387
	if (pp->ops->get_msi_addr)
388
		msi_target = pp->ops->get_msi_addr(pp);
389
	else
390 391 392 393
		msi_target = virt_to_phys((void *)pp->msi_data);

	msg.address_lo = (u32)(msi_target & 0xffffffff);
	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
394 395 396 397 398 399

	if (pp->ops->get_msi_data)
		msg.data = pp->ops->get_msi_data(pp, pos);
	else
		msg.data = pos;

400
	pci_write_msi_msg(irq, &msg);
401 402 403 404 405 406
}

static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
			struct msi_desc *desc)
{
	int irq, pos;
407
	struct pcie_port *pp = pdev->bus->sysdata;
408 409 410 411 412 413 414 415 416

	if (desc->msi_attrib.is_msix)
		return -EINVAL;

	irq = assign_irq(1, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);
J
Jingoo Han 已提交
417 418 419 420

	return 0;
}

421 422 423 424 425 426
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
			     int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
	int irq, pos;
	struct msi_desc *desc;
427
	struct pcie_port *pp = pdev->bus->sysdata;
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447

	/* MSI-X interrupts are not supported */
	if (type == PCI_CAP_ID_MSIX)
		return -EINVAL;

	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);

	irq = assign_irq(nvec, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);

	return 0;
#else
	return -EINVAL;
#endif
}

448
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
J
Jingoo Han 已提交
449
{
450
	struct irq_data *data = irq_get_irq_data(irq);
451
	struct msi_desc *msi = irq_data_get_msi_desc(data);
452
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
453 454

	clear_irq_range(pp, irq, 1, data->hwirq);
J
Jingoo Han 已提交
455 456
}

457
static struct msi_controller dw_pcie_msi_chip = {
J
Jingoo Han 已提交
458
	.setup_irq = dw_msi_setup_irq,
459
	.setup_irqs = dw_msi_setup_irqs,
J
Jingoo Han 已提交
460 461 462
	.teardown_irq = dw_msi_teardown_irq,
};

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
int dw_pcie_wait_for_link(struct pcie_port *pp)
{
	int retries;

	/* check if the link is up or not */
	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
		if (dw_pcie_link_up(pp)) {
			dev_info(pp->dev, "link up\n");
			return 0;
		}
		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
	}

	dev_err(pp->dev, "phy link never came up\n");

	return -ETIMEDOUT;
}

481 482
int dw_pcie_link_up(struct pcie_port *pp)
{
483 484
	u32 val;

485 486
	if (pp->ops->link_up)
		return pp->ops->link_up(pp);
487

488
	val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
489 490
	return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
		(!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
491 492
}

J
Jingoo Han 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
			irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops msi_domain_ops = {
	.map = dw_pcie_msi_map,
};

506 507 508 509 510 511 512 513 514 515 516
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
	u32 val;

	val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
	if (val == 0xffffffff)
		return 1;

	return 0;
}

517
int dw_pcie_host_init(struct pcie_port *pp)
518 519
{
	struct device_node *np = pp->dev->of_node;
520
	struct platform_device *pdev = to_platform_device(pp->dev);
521
	struct pci_bus *bus, *child;
522
	struct resource *cfg_res;
523
	int i, ret;
524
	LIST_HEAD(res);
525
	struct resource_entry *win, *tmp;
J
Jingoo Han 已提交
526

527 528
	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
	if (cfg_res) {
529 530
		pp->cfg0_size = resource_size(cfg_res)/2;
		pp->cfg1_size = resource_size(cfg_res)/2;
531
		pp->cfg0_base = cfg_res->start;
532
		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
533
	} else if (!pp->va_cfg0_base) {
534 535 536
		dev_err(pp->dev, "missing *config* reg space\n");
	}

537 538 539
	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
	if (ret)
		return ret;
540

541 542 543 544
	ret = devm_request_pci_bus_resources(&pdev->dev, &res);
	if (ret)
		goto error;

545
	/* Get the I/O and memory ranges from DT */
546
	resource_list_for_each_entry_safe(win, tmp, &res) {
547 548
		switch (resource_type(win->res)) {
		case IORESOURCE_IO:
549 550
			ret = pci_remap_iospace(win->res, pp->io_base);
			if (ret) {
551
				dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
552 553 554 555 556 557 558 559
					 ret, win->res);
				resource_list_destroy_entry(win);
			} else {
				pp->io = win->res;
				pp->io->name = "I/O";
				pp->io_size = resource_size(pp->io);
				pp->io_bus_addr = pp->io->start - win->offset;
			}
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
			break;
		case IORESOURCE_MEM:
			pp->mem = win->res;
			pp->mem->name = "MEM";
			pp->mem_size = resource_size(pp->mem);
			pp->mem_bus_addr = pp->mem->start - win->offset;
			break;
		case 0:
			pp->cfg = win->res;
			pp->cfg0_size = resource_size(pp->cfg)/2;
			pp->cfg1_size = resource_size(pp->cfg)/2;
			pp->cfg0_base = pp->cfg->start;
			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
			break;
		case IORESOURCE_BUS:
			pp->busn = win->res;
			break;
577
		}
578 579
	}

580
	if (!pp->dbi_base) {
581 582
		pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
					resource_size(pp->cfg));
583 584
		if (!pp->dbi_base) {
			dev_err(pp->dev, "error with ioremap\n");
585 586
			ret = -ENOMEM;
			goto error;
587 588 589
		}
	}

590
	pp->mem_base = pp->mem->start;
591 592

	if (!pp->va_cfg0_base) {
593
		pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
594
						pp->cfg0_size);
595 596
		if (!pp->va_cfg0_base) {
			dev_err(pp->dev, "error with ioremap in function\n");
597 598
			ret = -ENOMEM;
			goto error;
599
		}
600
	}
601

602
	if (!pp->va_cfg1_base) {
603
		pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
604
						pp->cfg1_size);
605 606
		if (!pp->va_cfg1_base) {
			dev_err(pp->dev, "error with ioremap\n");
607 608
			ret = -ENOMEM;
			goto error;
609
		}
610 611
	}

612 613 614
	ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
	if (ret)
		pp->lanes = 0;
615

616 617 618 619
	ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
	if (ret)
		pp->num_viewport = 2;

J
Jingoo Han 已提交
620
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
621 622 623 624 625 626
		if (!pp->ops->msi_host_init) {
			pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
						MAX_MSI_IRQS, &msi_domain_ops,
						&dw_pcie_msi_chip);
			if (!pp->irq_domain) {
				dev_err(pp->dev, "irq domain init failed\n");
627 628
				ret = -ENXIO;
				goto error;
629
			}
J
Jingoo Han 已提交
630

631 632 633 634 635
			for (i = 0; i < MAX_MSI_IRQS; i++)
				irq_create_mapping(pp->irq_domain, i);
		} else {
			ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
			if (ret < 0)
636
				goto error;
637
		}
J
Jingoo Han 已提交
638 639
	}

640 641
	pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);

642 643 644
	if (pp->ops->host_init)
		pp->ops->host_init(pp);

645 646 647 648 649 650 651 652 653
	pp->root_bus_nr = pp->busn->start;
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
					    &dw_pcie_ops, pp, &res,
					    &dw_pcie_msi_chip);
		dw_pcie_msi_chip.dev = pp->dev;
	} else
		bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
					pp, &res);
654 655 656 657
	if (!bus) {
		ret = -ENOMEM;
		goto error;
	}
658 659 660 661 662 663 664

	if (pp->ops->scan_bus)
		pp->ops->scan_bus(pp);

#ifdef CONFIG_ARM
	/* support old dtbs that incorrectly describe IRQs */
	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
665 666
#endif

667 668
	pci_bus_size_bridges(bus);
	pci_bus_assign_resources(bus);
669

670 671
	list_for_each_entry(child, &bus->children, node)
		pcie_bus_configure_settings(child);
672

673
	pci_bus_add_devices(bus);
674
	return 0;
675 676 677 678

error:
	pci_free_resource_list(&res);
	return ret;
679 680 681
}

static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
682 683
		u32 devfn, int where, int size, u32 *val)
{
684
	int ret, type;
685
	u32 busdev, cfg_size;
686 687
	u64 cpu_addr;
	void __iomem *va_cfg_base;
688

689 690 691
	if (pp->ops->rd_other_conf)
		return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);

692 693 694 695
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
696
		type = PCIE_ATU_TYPE_CFG0;
697
		cpu_addr = pp->cfg0_base;
698 699
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
700
	} else {
701
		type = PCIE_ATU_TYPE_CFG1;
702
		cpu_addr = pp->cfg1_base;
703 704
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
705 706
	}

707
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
708 709
				  type, cpu_addr,
				  busdev, cfg_size);
710
	ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
711
	if (pp->num_viewport <= 2)
712
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
713 714
					  PCIE_ATU_TYPE_IO, pp->io_base,
					  pp->io_bus_addr, pp->io_size);
715

716 717 718
	return ret;
}

719
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
720 721
		u32 devfn, int where, int size, u32 val)
{
722
	int ret, type;
723
	u32 busdev, cfg_size;
724 725
	u64 cpu_addr;
	void __iomem *va_cfg_base;
726

727 728 729
	if (pp->ops->wr_other_conf)
		return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);

730 731 732 733
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
734
		type = PCIE_ATU_TYPE_CFG0;
735
		cpu_addr = pp->cfg0_base;
736 737
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
738
	} else {
739
		type = PCIE_ATU_TYPE_CFG1;
740
		cpu_addr = pp->cfg1_base;
741 742
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
743 744
	}

745
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
746 747
				  type, cpu_addr,
				  busdev, cfg_size);
748
	ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
749
	if (pp->num_viewport <= 2)
750
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
751 752
					  PCIE_ATU_TYPE_IO, pp->io_base,
					  pp->io_bus_addr, pp->io_size);
753

754 755 756
	return ret;
}

757 758
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
				int dev)
759 760 761
{
	/* If there is no link, then there is no device */
	if (bus->number != pp->root_bus_nr) {
762
		if (!dw_pcie_link_up(pp))
763 764 765 766 767 768 769 770 771 772
			return 0;
	}

	/* access only one slot on each root port */
	if (bus->number == pp->root_bus_nr && dev > 0)
		return 0;

	return 1;
}

773
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
774 775
			int size, u32 *val)
{
776
	struct pcie_port *pp = bus->sysdata;
777

778
	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
779 780 781 782
		*val = 0xffffffff;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

783 784
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_rd_own_conf(pp, where, size, val);
785

786
	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
787 788
}

789
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
790 791
			int where, int size, u32 val)
{
792
	struct pcie_port *pp = bus->sysdata;
793

794
	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
795 796
		return PCIBIOS_DEVICE_NOT_FOUND;

797 798
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_wr_own_conf(pp, where, size, val);
799

800
	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
801 802
}

803 804 805
static struct pci_ops dw_pcie_ops = {
	.read = dw_pcie_rd_conf,
	.write = dw_pcie_wr_conf,
806 807
};

808
void dw_pcie_setup_rc(struct pcie_port *pp)
809 810 811
{
	u32 val;

812
	/* set the number of lanes */
813
	val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
814
	val &= ~PORT_LINK_MODE_MASK;
815 816 817 818 819 820 821 822 823 824
	switch (pp->lanes) {
	case 1:
		val |= PORT_LINK_MODE_1_LANES;
		break;
	case 2:
		val |= PORT_LINK_MODE_2_LANES;
		break;
	case 4:
		val |= PORT_LINK_MODE_4_LANES;
		break;
825 826 827
	case 8:
		val |= PORT_LINK_MODE_8_LANES;
		break;
828 829 830
	default:
		dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
		return;
831
	}
832
	dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
833 834

	/* set link width speed control register */
835
	val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
836
	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
837 838 839 840 841 842 843 844 845 846
	switch (pp->lanes) {
	case 1:
		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
		break;
	case 2:
		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
		break;
	case 4:
		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
		break;
847 848 849
	case 8:
		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
		break;
850
	}
851
	dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
852 853

	/* setup RC BARs */
854
	dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
855
	dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
856 857

	/* setup interrupt pins */
858
	val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
859 860
	val &= 0xffff00ff;
	val |= 0x00000100;
861
	dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
862 863

	/* setup bus numbers */
864
	val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
865 866
	val &= 0xff000000;
	val |= 0x00010100;
867
	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
868 869

	/* setup command register */
870
	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
871 872 873
	val &= 0xffff0000;
	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
874
	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
875 876 877 878 879 880

	/*
	 * If the platform provides ->rd_other_conf, it means the platform
	 * uses its own address translation component rather than ATU, so
	 * we should not program the ATU here.
	 */
881
	if (!pp->ops->rd_other_conf) {
882
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
883 884
					  PCIE_ATU_TYPE_MEM, pp->mem_base,
					  pp->mem_bus_addr, pp->mem_size);
885 886 887 888 889
		if (pp->num_viewport > 2)
			dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
						  PCIE_ATU_TYPE_IO, pp->io_base,
						  pp->io_bus_addr, pp->io_size);
	}
890 891 892 893 894 895 896 897 898

	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);

	/* program correct class for RC */
	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);

	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
	val |= PORT_LOGIC_SPEED_CHANGE;
	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
899
}