pcie-designware.c 22.9 KB
Newer Older
1
/*
2
 * Synopsys Designware PCIe host controller driver
3 4 5 6 7 8 9 10 11 12 13
 *
 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * Author: Jingoo Han <jg1.han@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

J
Jingoo Han 已提交
14 15
#include <linux/irq.h>
#include <linux/irqdomain.h>
16 17
#include <linux/kernel.h>
#include <linux/module.h>
J
Jingoo Han 已提交
18
#include <linux/msi.h>
19
#include <linux/of_address.h>
20
#include <linux/of_pci.h>
21 22
#include <linux/pci.h>
#include <linux/pci_regs.h>
23
#include <linux/platform_device.h>
24
#include <linux/types.h>
25
#include <linux/delay.h>
26

27
#include "pcie-designware.h"
28

29 30 31 32 33
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES		10
#define LINK_WAIT_USLEEP_MIN		90000
#define LINK_WAIT_USLEEP_MAX		100000

34 35 36 37 38 39
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES	5
#define LINK_WAIT_IATU_MIN		9000
#define LINK_WAIT_IATU_MAX		10000

/* Synopsys-specific PCIe configuration registers */
40 41
#define PCIE_PORT_LINK_CONTROL		0x710
#define PORT_LINK_MODE_MASK		(0x3f << 16)
42 43
#define PORT_LINK_MODE_1_LANES		(0x1 << 16)
#define PORT_LINK_MODE_2_LANES		(0x3 << 16)
44
#define PORT_LINK_MODE_4_LANES		(0x7 << 16)
45
#define PORT_LINK_MODE_8_LANES		(0xf << 16)
46 47 48

#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)
49
#define PORT_LOGIC_LINK_WIDTH_MASK	(0x1f << 8)
50 51 52
#define PORT_LOGIC_LINK_WIDTH_1_LANES	(0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES	(0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES	(0x4 << 8)
53
#define PORT_LOGIC_LINK_WIDTH_8_LANES	(0x8 << 8)
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

#define PCIE_MSI_ADDR_LO		0x820
#define PCIE_MSI_ADDR_HI		0x824
#define PCIE_MSI_INTR0_ENABLE		0x828
#define PCIE_MSI_INTR0_MASK		0x82C
#define PCIE_MSI_INTR0_STATUS		0x830

#define PCIE_ATU_VIEWPORT		0x900
#define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
#define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
#define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
#define PCIE_ATU_CR1			0x904
#define PCIE_ATU_TYPE_MEM		(0x0 << 0)
#define PCIE_ATU_TYPE_IO		(0x2 << 0)
#define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
#define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
#define PCIE_ATU_CR2			0x908
#define PCIE_ATU_ENABLE			(0x1 << 31)
#define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
#define PCIE_ATU_LOWER_BASE		0x90C
#define PCIE_ATU_UPPER_BASE		0x910
#define PCIE_ATU_LIMIT			0x914
#define PCIE_ATU_LOWER_TARGET		0x918
#define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
#define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
#define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET		0x91C

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * iATU Unroll-specific register definitions
 * From 4.80 core version the address translation will be made by unroll
 */
#define PCIE_ATU_UNR_REGION_CTRL1	0x00
#define PCIE_ATU_UNR_REGION_CTRL2	0x04
#define PCIE_ATU_UNR_LOWER_BASE		0x08
#define PCIE_ATU_UNR_UPPER_BASE		0x0C
#define PCIE_ATU_UNR_LIMIT		0x10
#define PCIE_ATU_UNR_LOWER_TARGET	0x14
#define PCIE_ATU_UNR_UPPER_TARGET	0x18

/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)  ((0x3 << 20) | (region << 9))

98 99 100 101 102
/* PCIe Port Logic registers */
#define PLR_OFFSET			0x700
#define PCIE_PHY_DEBUG_R1		(PLR_OFFSET + 0x2c)
#define PCIE_PHY_DEBUG_R1_LINK_UP	0x00000010

103
static struct pci_ops dw_pcie_ops;
104

105
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
106
{
107 108 109 110 111
	if ((uintptr_t)addr & (size - 1)) {
		*val = 0;
		return PCIBIOS_BAD_REGISTER_NUMBER;
	}

112 113
	if (size == 4)
		*val = readl(addr);
114
	else if (size == 2)
115
		*val = readw(addr);
116
	else if (size == 1)
117
		*val = readb(addr);
118 119
	else {
		*val = 0;
120
		return PCIBIOS_BAD_REGISTER_NUMBER;
121
	}
122 123 124 125

	return PCIBIOS_SUCCESSFUL;
}

126
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
127
{
128 129 130
	if ((uintptr_t)addr & (size - 1))
		return PCIBIOS_BAD_REGISTER_NUMBER;

131 132 133
	if (size == 4)
		writel(val, addr);
	else if (size == 2)
134
		writew(val, addr);
135
	else if (size == 1)
136
		writeb(val, addr);
137 138 139 140 141 142
	else
		return PCIBIOS_BAD_REGISTER_NUMBER;

	return PCIBIOS_SUCCESSFUL;
}

143
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
144
{
145
	if (pp->ops->readl_rc)
146 147 148
		return pp->ops->readl_rc(pp, pp->dbi_base + reg);

	return readl(pp->dbi_base + reg);
149 150
}

151
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
152
{
153
	if (pp->ops->writel_rc)
154
		pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
155
	else
156
		writel(val, pp->dbi_base + reg);
157 158
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->readl_rc)
		return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);

	return readl(pp->dbi_base + offset + reg);
}

static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
					 u32 val, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->writel_rc)
		pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
	else
		writel(val, pp->dbi_base + offset + reg);
}

180 181
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
			       u32 *val)
182
{
183
	if (pp->ops->rd_own_conf)
184
		return pp->ops->rd_own_conf(pp, where, size, val);
185

186
	return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
187 188
}

189 190
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
			       u32 val)
191
{
192
	if (pp->ops->wr_own_conf)
193
		return pp->ops->wr_own_conf(pp, where, size, val);
194

195
	return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
196 197
}

198 199 200
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
		int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
201
	u32 retries, val;
202

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
	if (pp->iatu_unroll_enabled) {
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			type, PCIE_ATU_UNR_REGION_CTRL1);
		dw_pcie_writel_unroll(pp, index,
			PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
	} else {
		dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
						PCIE_ATU_VIEWPORT);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
						PCIE_ATU_LOWER_BASE);
		dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
						PCIE_ATU_UPPER_BASE);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
						PCIE_ATU_LIMIT);
		dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
						PCIE_ATU_LOWER_TARGET);
		dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
						PCIE_ATU_UPPER_TARGET);
		dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
		dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
	}
234 235 236 237 238

	/*
	 * Make sure ATU enable takes effect before any subsequent config
	 * and I/O accesses.
	 */
239
	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
240 241 242 243 244 245
		if (pp->iatu_unroll_enabled)
			val = dw_pcie_readl_unroll(pp, index,
						   PCIE_ATU_UNR_REGION_CTRL2);
		else
			val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);

246 247 248 249 250 251
		if (val == PCIE_ATU_ENABLE)
			return;

		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
	}
	dev_err(pp->dev, "iATU is not being enabled\n");
252 253
}

J
Jingoo Han 已提交
254 255
static struct irq_chip dw_msi_irq_chip = {
	.name = "PCI-MSI",
256 257 258 259
	.irq_enable = pci_msi_unmask_irq,
	.irq_disable = pci_msi_mask_irq,
	.irq_mask = pci_msi_mask_irq,
	.irq_unmask = pci_msi_unmask_irq,
J
Jingoo Han 已提交
260 261 262
};

/* MSI int handler */
263
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
J
Jingoo Han 已提交
264 265
{
	unsigned long val;
266
	int i, pos, irq;
267
	irqreturn_t ret = IRQ_NONE;
J
Jingoo Han 已提交
268 269 270 271 272

	for (i = 0; i < MAX_MSI_CTRLS; i++) {
		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
				(u32 *)&val);
		if (val) {
273
			ret = IRQ_HANDLED;
J
Jingoo Han 已提交
274 275
			pos = 0;
			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
276 277
				irq = irq_find_mapping(pp->irq_domain,
						i * 32 + pos);
H
Harro Haan 已提交
278 279 280
				dw_pcie_wr_own_conf(pp,
						PCIE_MSI_INTR0_STATUS + i * 12,
						4, 1 << pos);
281
				generic_handle_irq(irq);
J
Jingoo Han 已提交
282 283 284 285
				pos++;
			}
		}
	}
286 287

	return ret;
J
Jingoo Han 已提交
288 289 290 291
}

void dw_pcie_msi_init(struct pcie_port *pp)
{
292 293
	u64 msi_target;

J
Jingoo Han 已提交
294
	pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
295
	msi_target = virt_to_phys((void *)pp->msi_data);
J
Jingoo Han 已提交
296 297 298

	/* program the msi_data */
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
299 300 301
			    (u32)(msi_target & 0xffffffff));
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
			    (u32)(msi_target >> 32 & 0xffffffff));
J
Jingoo Han 已提交
302 303
}

304 305 306 307 308 309 310 311 312 313 314
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val &= ~(1 << bit);
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

315
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
316
			    unsigned int nvec, unsigned int pos)
317
{
318
	unsigned int i;
319

320
	for (i = 0; i < nvec; i++) {
321
		irq_set_msi_desc_off(irq_base, i, NULL);
322
		/* Disable corresponding interrupt on MSI controller */
323 324 325 326
		if (pp->ops->msi_clear_irq)
			pp->ops->msi_clear_irq(pp, pos + i);
		else
			dw_pcie_msi_clear_irq(pp, pos + i);
327
	}
328 329

	bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
330 331
}

332 333 334 335 336 337 338 339 340 341 342
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val |= 1 << bit;
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

J
Jingoo Han 已提交
343 344
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
345
	int irq, pos0, i;
346
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
J
Jingoo Han 已提交
347

348 349 350 351
	pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
				       order_base_2(no_irqs));
	if (pos0 < 0)
		goto no_valid_irq;
J
Jingoo Han 已提交
352

353 354
	irq = irq_find_mapping(pp->irq_domain, pos0);
	if (!irq)
J
Jingoo Han 已提交
355 356
		goto no_valid_irq;

357 358 359 360 361 362 363
	/*
	 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
	 * descs so there is no need to allocate descs here. We can therefore
	 * assume that if irq_find_mapping above returns non-zero, then the
	 * descs are also successfully allocated.
	 */

364
	for (i = 0; i < no_irqs; i++) {
365 366 367 368
		if (irq_set_msi_desc_off(irq, i, desc) != 0) {
			clear_irq_range(pp, irq, i, pos0);
			goto no_valid_irq;
		}
J
Jingoo Han 已提交
369
		/*Enable corresponding interrupt in MSI interrupt controller */
370 371 372 373
		if (pp->ops->msi_set_irq)
			pp->ops->msi_set_irq(pp, pos0 + i);
		else
			dw_pcie_msi_set_irq(pp, pos0 + i);
J
Jingoo Han 已提交
374 375 376
	}

	*pos = pos0;
377 378 379
	desc->nvec_used = no_irqs;
	desc->msi_attrib.multiple = order_base_2(no_irqs);

J
Jingoo Han 已提交
380 381 382 383 384 385 386
	return irq;

no_valid_irq:
	*pos = pos0;
	return -ENOSPC;
}

387
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
J
Jingoo Han 已提交
388 389
{
	struct msi_msg msg;
390
	u64 msi_target;
J
Jingoo Han 已提交
391

392
	if (pp->ops->get_msi_addr)
393
		msi_target = pp->ops->get_msi_addr(pp);
394
	else
395 396 397 398
		msi_target = virt_to_phys((void *)pp->msi_data);

	msg.address_lo = (u32)(msi_target & 0xffffffff);
	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
399 400 401 402 403 404

	if (pp->ops->get_msi_data)
		msg.data = pp->ops->get_msi_data(pp, pos);
	else
		msg.data = pos;

405
	pci_write_msi_msg(irq, &msg);
406 407 408 409 410 411
}

static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
			struct msi_desc *desc)
{
	int irq, pos;
412
	struct pcie_port *pp = pdev->bus->sysdata;
413 414 415 416 417 418 419 420 421

	if (desc->msi_attrib.is_msix)
		return -EINVAL;

	irq = assign_irq(1, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);
J
Jingoo Han 已提交
422 423 424 425

	return 0;
}

426 427 428 429 430 431
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
			     int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
	int irq, pos;
	struct msi_desc *desc;
432
	struct pcie_port *pp = pdev->bus->sysdata;
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452

	/* MSI-X interrupts are not supported */
	if (type == PCI_CAP_ID_MSIX)
		return -EINVAL;

	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);

	irq = assign_irq(nvec, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);

	return 0;
#else
	return -EINVAL;
#endif
}

453
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
J
Jingoo Han 已提交
454
{
455
	struct irq_data *data = irq_get_irq_data(irq);
456
	struct msi_desc *msi = irq_data_get_msi_desc(data);
457
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
458 459

	clear_irq_range(pp, irq, 1, data->hwirq);
J
Jingoo Han 已提交
460 461
}

462
static struct msi_controller dw_pcie_msi_chip = {
J
Jingoo Han 已提交
463
	.setup_irq = dw_msi_setup_irq,
464
	.setup_irqs = dw_msi_setup_irqs,
J
Jingoo Han 已提交
465 466 467
	.teardown_irq = dw_msi_teardown_irq,
};

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
int dw_pcie_wait_for_link(struct pcie_port *pp)
{
	int retries;

	/* check if the link is up or not */
	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
		if (dw_pcie_link_up(pp)) {
			dev_info(pp->dev, "link up\n");
			return 0;
		}
		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
	}

	dev_err(pp->dev, "phy link never came up\n");

	return -ETIMEDOUT;
}

486 487
int dw_pcie_link_up(struct pcie_port *pp)
{
488 489
	u32 val;

490 491
	if (pp->ops->link_up)
		return pp->ops->link_up(pp);
492

493 494
	val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
	return val & PCIE_PHY_DEBUG_R1_LINK_UP;
495 496
}

J
Jingoo Han 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
			irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops msi_domain_ops = {
	.map = dw_pcie_msi_map,
};

510 511 512 513 514 515 516 517 518 519 520
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
	u32 val;

	val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
	if (val == 0xffffffff)
		return 1;

	return 0;
}

521
int dw_pcie_host_init(struct pcie_port *pp)
522 523
{
	struct device_node *np = pp->dev->of_node;
524
	struct platform_device *pdev = to_platform_device(pp->dev);
525
	struct pci_bus *bus, *child;
526
	struct resource *cfg_res;
527
	int i, ret;
528 529
	LIST_HEAD(res);
	struct resource_entry *win;
J
Jingoo Han 已提交
530

531 532
	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
	if (cfg_res) {
533 534
		pp->cfg0_size = resource_size(cfg_res)/2;
		pp->cfg1_size = resource_size(cfg_res)/2;
535
		pp->cfg0_base = cfg_res->start;
536
		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
537
	} else if (!pp->va_cfg0_base) {
538 539 540
		dev_err(pp->dev, "missing *config* reg space\n");
	}

541 542 543
	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
	if (ret)
		return ret;
544

545 546 547 548
	ret = devm_request_pci_bus_resources(&pdev->dev, &res);
	if (ret)
		goto error;

549
	/* Get the I/O and memory ranges from DT */
550 551 552 553 554 555 556
	resource_list_for_each_entry(win, &res) {
		switch (resource_type(win->res)) {
		case IORESOURCE_IO:
			pp->io = win->res;
			pp->io->name = "I/O";
			pp->io_size = resource_size(pp->io);
			pp->io_bus_addr = pp->io->start - win->offset;
557
			ret = pci_remap_iospace(pp->io, pp->io_base);
558
			if (ret)
559 560
				dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
					 ret, pp->io);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
			break;
		case IORESOURCE_MEM:
			pp->mem = win->res;
			pp->mem->name = "MEM";
			pp->mem_size = resource_size(pp->mem);
			pp->mem_bus_addr = pp->mem->start - win->offset;
			break;
		case 0:
			pp->cfg = win->res;
			pp->cfg0_size = resource_size(pp->cfg)/2;
			pp->cfg1_size = resource_size(pp->cfg)/2;
			pp->cfg0_base = pp->cfg->start;
			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
			break;
		case IORESOURCE_BUS:
			pp->busn = win->res;
			break;
578
		}
579 580
	}

581
	if (!pp->dbi_base) {
582 583
		pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
					resource_size(pp->cfg));
584 585
		if (!pp->dbi_base) {
			dev_err(pp->dev, "error with ioremap\n");
586 587
			ret = -ENOMEM;
			goto error;
588 589 590
		}
	}

591
	pp->mem_base = pp->mem->start;
592 593

	if (!pp->va_cfg0_base) {
594
		pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
595
						pp->cfg0_size);
596 597
		if (!pp->va_cfg0_base) {
			dev_err(pp->dev, "error with ioremap in function\n");
598 599
			ret = -ENOMEM;
			goto error;
600
		}
601
	}
602

603
	if (!pp->va_cfg1_base) {
604
		pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
605
						pp->cfg1_size);
606 607
		if (!pp->va_cfg1_base) {
			dev_err(pp->dev, "error with ioremap\n");
608 609
			ret = -ENOMEM;
			goto error;
610
		}
611 612
	}

613 614 615
	ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
	if (ret)
		pp->lanes = 0;
616

J
Jingoo Han 已提交
617
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
618 619 620 621 622 623
		if (!pp->ops->msi_host_init) {
			pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
						MAX_MSI_IRQS, &msi_domain_ops,
						&dw_pcie_msi_chip);
			if (!pp->irq_domain) {
				dev_err(pp->dev, "irq domain init failed\n");
624 625
				ret = -ENXIO;
				goto error;
626
			}
J
Jingoo Han 已提交
627

628 629 630 631 632
			for (i = 0; i < MAX_MSI_IRQS; i++)
				irq_create_mapping(pp->irq_domain, i);
		} else {
			ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
			if (ret < 0)
633
				goto error;
634
		}
J
Jingoo Han 已提交
635 636
	}

637 638
	pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);

639 640 641
	if (pp->ops->host_init)
		pp->ops->host_init(pp);

642 643 644 645 646 647 648 649 650
	pp->root_bus_nr = pp->busn->start;
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
					    &dw_pcie_ops, pp, &res,
					    &dw_pcie_msi_chip);
		dw_pcie_msi_chip.dev = pp->dev;
	} else
		bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
					pp, &res);
651 652 653 654
	if (!bus) {
		ret = -ENOMEM;
		goto error;
	}
655 656 657 658 659 660 661

	if (pp->ops->scan_bus)
		pp->ops->scan_bus(pp);

#ifdef CONFIG_ARM
	/* support old dtbs that incorrectly describe IRQs */
	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
662 663
#endif

664 665
	pci_bus_size_bridges(bus);
	pci_bus_assign_resources(bus);
666

667 668
	list_for_each_entry(child, &bus->children, node)
		pcie_bus_configure_settings(child);
669

670
	pci_bus_add_devices(bus);
671
	return 0;
672 673 674 675

error:
	pci_free_resource_list(&res);
	return ret;
676 677 678
}

static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
679 680
		u32 devfn, int where, int size, u32 *val)
{
681
	int ret, type;
682
	u32 busdev, cfg_size;
683 684
	u64 cpu_addr;
	void __iomem *va_cfg_base;
685

686 687 688
	if (pp->ops->rd_other_conf)
		return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);

689 690 691 692
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
693
		type = PCIE_ATU_TYPE_CFG0;
694
		cpu_addr = pp->cfg0_base;
695 696
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
697
	} else {
698
		type = PCIE_ATU_TYPE_CFG1;
699
		cpu_addr = pp->cfg1_base;
700 701
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
702 703
	}

704 705 706
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
				  type, cpu_addr,
				  busdev, cfg_size);
707
	ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
708
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
709
				  PCIE_ATU_TYPE_IO, pp->io_base,
710 711
				  pp->io_bus_addr, pp->io_size);

712 713 714
	return ret;
}

715
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
716 717
		u32 devfn, int where, int size, u32 val)
{
718
	int ret, type;
719
	u32 busdev, cfg_size;
720 721
	u64 cpu_addr;
	void __iomem *va_cfg_base;
722

723 724 725
	if (pp->ops->wr_other_conf)
		return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);

726 727 728 729
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
730
		type = PCIE_ATU_TYPE_CFG0;
731
		cpu_addr = pp->cfg0_base;
732 733
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
734
	} else {
735
		type = PCIE_ATU_TYPE_CFG1;
736
		cpu_addr = pp->cfg1_base;
737 738
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
739 740
	}

741 742 743
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
				  type, cpu_addr,
				  busdev, cfg_size);
744
	ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
745
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
746
				  PCIE_ATU_TYPE_IO, pp->io_base,
747 748
				  pp->io_bus_addr, pp->io_size);

749 750 751
	return ret;
}

752
static int dw_pcie_valid_config(struct pcie_port *pp,
753 754 755 756
				struct pci_bus *bus, int dev)
{
	/* If there is no link, then there is no device */
	if (bus->number != pp->root_bus_nr) {
757
		if (!dw_pcie_link_up(pp))
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
			return 0;
	}

	/* access only one slot on each root port */
	if (bus->number == pp->root_bus_nr && dev > 0)
		return 0;

	/*
	 * do not read more than one device on the bus directly attached
	 * to RC's (Virtual Bridge's) DS side.
	 */
	if (bus->primary == pp->root_bus_nr && dev > 0)
		return 0;

	return 1;
}

775
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
776 777
			int size, u32 *val)
{
778
	struct pcie_port *pp = bus->sysdata;
779

780
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
781 782 783 784
		*val = 0xffffffff;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

785 786
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_rd_own_conf(pp, where, size, val);
787

788
	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
789 790
}

791
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
792 793
			int where, int size, u32 val)
{
794
	struct pcie_port *pp = bus->sysdata;
795

796
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
797 798
		return PCIBIOS_DEVICE_NOT_FOUND;

799 800
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_wr_own_conf(pp, where, size, val);
801

802
	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
803 804
}

805 806 807
static struct pci_ops dw_pcie_ops = {
	.read = dw_pcie_rd_conf,
	.write = dw_pcie_wr_conf,
808 809
};

810
void dw_pcie_setup_rc(struct pcie_port *pp)
811 812 813
{
	u32 val;

814
	/* set the number of lanes */
815
	val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
816
	val &= ~PORT_LINK_MODE_MASK;
817 818 819 820 821 822 823 824 825 826
	switch (pp->lanes) {
	case 1:
		val |= PORT_LINK_MODE_1_LANES;
		break;
	case 2:
		val |= PORT_LINK_MODE_2_LANES;
		break;
	case 4:
		val |= PORT_LINK_MODE_4_LANES;
		break;
827 828 829
	case 8:
		val |= PORT_LINK_MODE_8_LANES;
		break;
830 831 832
	default:
		dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
		return;
833
	}
834
	dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
835 836

	/* set link width speed control register */
837
	val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
838
	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
839 840 841 842 843 844 845 846 847 848
	switch (pp->lanes) {
	case 1:
		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
		break;
	case 2:
		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
		break;
	case 4:
		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
		break;
849 850 851
	case 8:
		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
		break;
852
	}
853
	dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
854 855

	/* setup RC BARs */
856
	dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
857
	dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
858 859

	/* setup interrupt pins */
860
	val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
861 862
	val &= 0xffff00ff;
	val |= 0x00000100;
863
	dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
864 865

	/* setup bus numbers */
866
	val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
867 868
	val &= 0xff000000;
	val |= 0x00010100;
869
	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
870 871

	/* setup command register */
872
	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
873 874 875
	val &= 0xffff0000;
	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
876
	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895

	/*
	 * If the platform provides ->rd_other_conf, it means the platform
	 * uses its own address translation component rather than ATU, so
	 * we should not program the ATU here.
	 */
	if (!pp->ops->rd_other_conf)
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
					  PCIE_ATU_TYPE_MEM, pp->mem_base,
					  pp->mem_bus_addr, pp->mem_size);

	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);

	/* program correct class for RC */
	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);

	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
	val |= PORT_LOGIC_SPEED_CHANGE;
	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
896 897 898
}

MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
899
MODULE_DESCRIPTION("Designware PCIe host controller driver");
900
MODULE_LICENSE("GPL v2");