pcie-designware.c 23.0 KB
Newer Older
1
/*
2
 * Synopsys Designware PCIe host controller driver
3 4 5 6 7 8 9 10 11 12 13
 *
 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * Author: Jingoo Han <jg1.han@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

J
Jingoo Han 已提交
14 15
#include <linux/irq.h>
#include <linux/irqdomain.h>
16 17
#include <linux/kernel.h>
#include <linux/module.h>
J
Jingoo Han 已提交
18
#include <linux/msi.h>
19
#include <linux/of_address.h>
20
#include <linux/of_pci.h>
21 22
#include <linux/pci.h>
#include <linux/pci_regs.h>
23
#include <linux/platform_device.h>
24
#include <linux/types.h>
25
#include <linux/delay.h>
26

27
#include "pcie-designware.h"
28

29 30 31 32 33
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES		10
#define LINK_WAIT_USLEEP_MIN		90000
#define LINK_WAIT_USLEEP_MAX		100000

34 35 36 37 38 39
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES	5
#define LINK_WAIT_IATU_MIN		9000
#define LINK_WAIT_IATU_MAX		10000

/* Synopsys-specific PCIe configuration registers */
40 41
#define PCIE_PORT_LINK_CONTROL		0x710
#define PORT_LINK_MODE_MASK		(0x3f << 16)
42 43
#define PORT_LINK_MODE_1_LANES		(0x1 << 16)
#define PORT_LINK_MODE_2_LANES		(0x3 << 16)
44
#define PORT_LINK_MODE_4_LANES		(0x7 << 16)
45
#define PORT_LINK_MODE_8_LANES		(0xf << 16)
46 47 48

#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)
49
#define PORT_LOGIC_LINK_WIDTH_MASK	(0x1f << 8)
50 51 52
#define PORT_LOGIC_LINK_WIDTH_1_LANES	(0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES	(0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES	(0x4 << 8)
53
#define PORT_LOGIC_LINK_WIDTH_8_LANES	(0x8 << 8)
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

#define PCIE_MSI_ADDR_LO		0x820
#define PCIE_MSI_ADDR_HI		0x824
#define PCIE_MSI_INTR0_ENABLE		0x828
#define PCIE_MSI_INTR0_MASK		0x82C
#define PCIE_MSI_INTR0_STATUS		0x830

#define PCIE_ATU_VIEWPORT		0x900
#define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
#define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
#define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
#define PCIE_ATU_CR1			0x904
#define PCIE_ATU_TYPE_MEM		(0x0 << 0)
#define PCIE_ATU_TYPE_IO		(0x2 << 0)
#define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
#define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
#define PCIE_ATU_CR2			0x908
#define PCIE_ATU_ENABLE			(0x1 << 31)
#define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
#define PCIE_ATU_LOWER_BASE		0x90C
#define PCIE_ATU_UPPER_BASE		0x910
#define PCIE_ATU_LIMIT			0x914
#define PCIE_ATU_LOWER_TARGET		0x918
#define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
#define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
#define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET		0x91C

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * iATU Unroll-specific register definitions
 * From 4.80 core version the address translation will be made by unroll
 */
#define PCIE_ATU_UNR_REGION_CTRL1	0x00
#define PCIE_ATU_UNR_REGION_CTRL2	0x04
#define PCIE_ATU_UNR_LOWER_BASE		0x08
#define PCIE_ATU_UNR_UPPER_BASE		0x0C
#define PCIE_ATU_UNR_LIMIT		0x10
#define PCIE_ATU_UNR_LOWER_TARGET	0x14
#define PCIE_ATU_UNR_UPPER_TARGET	0x18

/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)  ((0x3 << 20) | (region << 9))

98 99 100
/* PCIe Port Logic registers */
#define PLR_OFFSET			0x700
#define PCIE_PHY_DEBUG_R1		(PLR_OFFSET + 0x2c)
101 102
#define PCIE_PHY_DEBUG_R1_LINK_UP	(0x1 << 4)
#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING	(0x1 << 29)
103

104
static struct pci_ops dw_pcie_ops;
105

106
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
107
{
108 109 110 111 112
	if ((uintptr_t)addr & (size - 1)) {
		*val = 0;
		return PCIBIOS_BAD_REGISTER_NUMBER;
	}

113 114
	if (size == 4)
		*val = readl(addr);
115
	else if (size == 2)
116
		*val = readw(addr);
117
	else if (size == 1)
118
		*val = readb(addr);
119 120
	else {
		*val = 0;
121
		return PCIBIOS_BAD_REGISTER_NUMBER;
122
	}
123 124 125 126

	return PCIBIOS_SUCCESSFUL;
}

127
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
128
{
129 130 131
	if ((uintptr_t)addr & (size - 1))
		return PCIBIOS_BAD_REGISTER_NUMBER;

132 133 134
	if (size == 4)
		writel(val, addr);
	else if (size == 2)
135
		writew(val, addr);
136
	else if (size == 1)
137
		writeb(val, addr);
138 139 140 141 142 143
	else
		return PCIBIOS_BAD_REGISTER_NUMBER;

	return PCIBIOS_SUCCESSFUL;
}

144
static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
145
{
146
	if (pp->ops->readl_rc)
147 148 149
		return pp->ops->readl_rc(pp, pp->dbi_base + reg);

	return readl(pp->dbi_base + reg);
150 151
}

152
static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
153
{
154
	if (pp->ops->writel_rc)
155
		pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
156
	else
157
		writel(val, pp->dbi_base + reg);
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->readl_rc)
		return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);

	return readl(pp->dbi_base + offset + reg);
}

static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
					 u32 val, u32 reg)
{
	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);

	if (pp->ops->writel_rc)
		pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
	else
		writel(val, pp->dbi_base + offset + reg);
}

181 182
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
			       u32 *val)
183
{
184
	if (pp->ops->rd_own_conf)
185
		return pp->ops->rd_own_conf(pp, where, size, val);
186

187
	return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
188 189
}

190 191
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
			       u32 val)
192
{
193
	if (pp->ops->wr_own_conf)
194
		return pp->ops->wr_own_conf(pp, where, size, val);
195

196
	return dw_pcie_cfg_write(pp->dbi_base + where, size, val);
197 198
}

199 200 201
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
		int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
202
	u32 retries, val;
203

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	if (pp->iatu_unroll_enabled) {
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
		dw_pcie_writel_unroll(pp, index,
			lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
		dw_pcie_writel_unroll(pp, index,
			type, PCIE_ATU_UNR_REGION_CTRL1);
		dw_pcie_writel_unroll(pp, index,
			PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
	} else {
		dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
						PCIE_ATU_VIEWPORT);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
						PCIE_ATU_LOWER_BASE);
		dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
						PCIE_ATU_UPPER_BASE);
		dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
						PCIE_ATU_LIMIT);
		dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
						PCIE_ATU_LOWER_TARGET);
		dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
						PCIE_ATU_UPPER_TARGET);
		dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
		dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
	}
235 236 237 238 239

	/*
	 * Make sure ATU enable takes effect before any subsequent config
	 * and I/O accesses.
	 */
240
	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
241 242 243 244 245 246
		if (pp->iatu_unroll_enabled)
			val = dw_pcie_readl_unroll(pp, index,
						   PCIE_ATU_UNR_REGION_CTRL2);
		else
			val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);

247 248 249 250 251 252
		if (val == PCIE_ATU_ENABLE)
			return;

		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
	}
	dev_err(pp->dev, "iATU is not being enabled\n");
253 254
}

J
Jingoo Han 已提交
255 256
static struct irq_chip dw_msi_irq_chip = {
	.name = "PCI-MSI",
257 258 259 260
	.irq_enable = pci_msi_unmask_irq,
	.irq_disable = pci_msi_mask_irq,
	.irq_mask = pci_msi_mask_irq,
	.irq_unmask = pci_msi_unmask_irq,
J
Jingoo Han 已提交
261 262 263
};

/* MSI int handler */
264
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
J
Jingoo Han 已提交
265 266
{
	unsigned long val;
267
	int i, pos, irq;
268
	irqreturn_t ret = IRQ_NONE;
J
Jingoo Han 已提交
269 270 271 272 273

	for (i = 0; i < MAX_MSI_CTRLS; i++) {
		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
				(u32 *)&val);
		if (val) {
274
			ret = IRQ_HANDLED;
J
Jingoo Han 已提交
275 276
			pos = 0;
			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
277 278
				irq = irq_find_mapping(pp->irq_domain,
						i * 32 + pos);
H
Harro Haan 已提交
279 280 281
				dw_pcie_wr_own_conf(pp,
						PCIE_MSI_INTR0_STATUS + i * 12,
						4, 1 << pos);
282
				generic_handle_irq(irq);
J
Jingoo Han 已提交
283 284 285 286
				pos++;
			}
		}
	}
287 288

	return ret;
J
Jingoo Han 已提交
289 290 291 292
}

void dw_pcie_msi_init(struct pcie_port *pp)
{
293 294
	u64 msi_target;

J
Jingoo Han 已提交
295
	pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
296
	msi_target = virt_to_phys((void *)pp->msi_data);
J
Jingoo Han 已提交
297 298 299

	/* program the msi_data */
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
300 301 302
			    (u32)(msi_target & 0xffffffff));
	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
			    (u32)(msi_target >> 32 & 0xffffffff));
J
Jingoo Han 已提交
303 304
}

305 306 307 308 309 310 311 312 313 314 315
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val &= ~(1 << bit);
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

316
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
317
			    unsigned int nvec, unsigned int pos)
318
{
319
	unsigned int i;
320

321
	for (i = 0; i < nvec; i++) {
322
		irq_set_msi_desc_off(irq_base, i, NULL);
323
		/* Disable corresponding interrupt on MSI controller */
324 325 326 327
		if (pp->ops->msi_clear_irq)
			pp->ops->msi_clear_irq(pp, pos + i);
		else
			dw_pcie_msi_clear_irq(pp, pos + i);
328
	}
329 330

	bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
331 332
}

333 334 335 336 337 338 339 340 341 342 343
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
	unsigned int res, bit, val;

	res = (irq / 32) * 12;
	bit = irq % 32;
	dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
	val |= 1 << bit;
	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}

J
Jingoo Han 已提交
344 345
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
346
	int irq, pos0, i;
347
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
J
Jingoo Han 已提交
348

349 350 351 352
	pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
				       order_base_2(no_irqs));
	if (pos0 < 0)
		goto no_valid_irq;
J
Jingoo Han 已提交
353

354 355
	irq = irq_find_mapping(pp->irq_domain, pos0);
	if (!irq)
J
Jingoo Han 已提交
356 357
		goto no_valid_irq;

358 359 360 361 362 363 364
	/*
	 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
	 * descs so there is no need to allocate descs here. We can therefore
	 * assume that if irq_find_mapping above returns non-zero, then the
	 * descs are also successfully allocated.
	 */

365
	for (i = 0; i < no_irqs; i++) {
366 367 368 369
		if (irq_set_msi_desc_off(irq, i, desc) != 0) {
			clear_irq_range(pp, irq, i, pos0);
			goto no_valid_irq;
		}
J
Jingoo Han 已提交
370
		/*Enable corresponding interrupt in MSI interrupt controller */
371 372 373 374
		if (pp->ops->msi_set_irq)
			pp->ops->msi_set_irq(pp, pos0 + i);
		else
			dw_pcie_msi_set_irq(pp, pos0 + i);
J
Jingoo Han 已提交
375 376 377
	}

	*pos = pos0;
378 379 380
	desc->nvec_used = no_irqs;
	desc->msi_attrib.multiple = order_base_2(no_irqs);

J
Jingoo Han 已提交
381 382 383 384 385 386 387
	return irq;

no_valid_irq:
	*pos = pos0;
	return -ENOSPC;
}

388
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
J
Jingoo Han 已提交
389 390
{
	struct msi_msg msg;
391
	u64 msi_target;
J
Jingoo Han 已提交
392

393
	if (pp->ops->get_msi_addr)
394
		msi_target = pp->ops->get_msi_addr(pp);
395
	else
396 397 398 399
		msi_target = virt_to_phys((void *)pp->msi_data);

	msg.address_lo = (u32)(msi_target & 0xffffffff);
	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
400 401 402 403 404 405

	if (pp->ops->get_msi_data)
		msg.data = pp->ops->get_msi_data(pp, pos);
	else
		msg.data = pos;

406
	pci_write_msi_msg(irq, &msg);
407 408 409 410 411 412
}

static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
			struct msi_desc *desc)
{
	int irq, pos;
413
	struct pcie_port *pp = pdev->bus->sysdata;
414 415 416 417 418 419 420 421 422

	if (desc->msi_attrib.is_msix)
		return -EINVAL;

	irq = assign_irq(1, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);
J
Jingoo Han 已提交
423 424 425 426

	return 0;
}

427 428 429 430 431 432
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
			     int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
	int irq, pos;
	struct msi_desc *desc;
433
	struct pcie_port *pp = pdev->bus->sysdata;
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

	/* MSI-X interrupts are not supported */
	if (type == PCI_CAP_ID_MSIX)
		return -EINVAL;

	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);

	irq = assign_irq(nvec, desc, &pos);
	if (irq < 0)
		return irq;

	dw_msi_setup_msg(pp, irq, pos);

	return 0;
#else
	return -EINVAL;
#endif
}

454
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
J
Jingoo Han 已提交
455
{
456
	struct irq_data *data = irq_get_irq_data(irq);
457
	struct msi_desc *msi = irq_data_get_msi_desc(data);
458
	struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
459 460

	clear_irq_range(pp, irq, 1, data->hwirq);
J
Jingoo Han 已提交
461 462
}

463
static struct msi_controller dw_pcie_msi_chip = {
J
Jingoo Han 已提交
464
	.setup_irq = dw_msi_setup_irq,
465
	.setup_irqs = dw_msi_setup_irqs,
J
Jingoo Han 已提交
466 467 468
	.teardown_irq = dw_msi_teardown_irq,
};

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
int dw_pcie_wait_for_link(struct pcie_port *pp)
{
	int retries;

	/* check if the link is up or not */
	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
		if (dw_pcie_link_up(pp)) {
			dev_info(pp->dev, "link up\n");
			return 0;
		}
		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
	}

	dev_err(pp->dev, "phy link never came up\n");

	return -ETIMEDOUT;
}

487 488
int dw_pcie_link_up(struct pcie_port *pp)
{
489 490
	u32 val;

491 492
	if (pp->ops->link_up)
		return pp->ops->link_up(pp);
493

494
	val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
495 496
	return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
		(!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
497 498
}

J
Jingoo Han 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
			irq_hw_number_t hwirq)
{
	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
	irq_set_chip_data(irq, domain->host_data);

	return 0;
}

static const struct irq_domain_ops msi_domain_ops = {
	.map = dw_pcie_msi_map,
};

512 513 514 515 516 517 518 519 520 521 522
static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
{
	u32 val;

	val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
	if (val == 0xffffffff)
		return 1;

	return 0;
}

523
int dw_pcie_host_init(struct pcie_port *pp)
524 525
{
	struct device_node *np = pp->dev->of_node;
526
	struct platform_device *pdev = to_platform_device(pp->dev);
527
	struct pci_bus *bus, *child;
528
	struct resource *cfg_res;
529
	int i, ret;
530 531
	LIST_HEAD(res);
	struct resource_entry *win;
J
Jingoo Han 已提交
532

533 534
	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
	if (cfg_res) {
535 536
		pp->cfg0_size = resource_size(cfg_res)/2;
		pp->cfg1_size = resource_size(cfg_res)/2;
537
		pp->cfg0_base = cfg_res->start;
538
		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
539
	} else if (!pp->va_cfg0_base) {
540 541 542
		dev_err(pp->dev, "missing *config* reg space\n");
	}

543 544 545
	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
	if (ret)
		return ret;
546

547 548 549 550
	ret = devm_request_pci_bus_resources(&pdev->dev, &res);
	if (ret)
		goto error;

551
	/* Get the I/O and memory ranges from DT */
552 553 554 555 556 557 558
	resource_list_for_each_entry(win, &res) {
		switch (resource_type(win->res)) {
		case IORESOURCE_IO:
			pp->io = win->res;
			pp->io->name = "I/O";
			pp->io_size = resource_size(pp->io);
			pp->io_bus_addr = pp->io->start - win->offset;
559
			ret = pci_remap_iospace(pp->io, pp->io_base);
560
			if (ret)
561 562
				dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
					 ret, pp->io);
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
			break;
		case IORESOURCE_MEM:
			pp->mem = win->res;
			pp->mem->name = "MEM";
			pp->mem_size = resource_size(pp->mem);
			pp->mem_bus_addr = pp->mem->start - win->offset;
			break;
		case 0:
			pp->cfg = win->res;
			pp->cfg0_size = resource_size(pp->cfg)/2;
			pp->cfg1_size = resource_size(pp->cfg)/2;
			pp->cfg0_base = pp->cfg->start;
			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
			break;
		case IORESOURCE_BUS:
			pp->busn = win->res;
			break;
580
		}
581 582
	}

583
	if (!pp->dbi_base) {
584 585
		pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
					resource_size(pp->cfg));
586 587
		if (!pp->dbi_base) {
			dev_err(pp->dev, "error with ioremap\n");
588 589
			ret = -ENOMEM;
			goto error;
590 591 592
		}
	}

593
	pp->mem_base = pp->mem->start;
594 595

	if (!pp->va_cfg0_base) {
596
		pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
597
						pp->cfg0_size);
598 599
		if (!pp->va_cfg0_base) {
			dev_err(pp->dev, "error with ioremap in function\n");
600 601
			ret = -ENOMEM;
			goto error;
602
		}
603
	}
604

605
	if (!pp->va_cfg1_base) {
606
		pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
607
						pp->cfg1_size);
608 609
		if (!pp->va_cfg1_base) {
			dev_err(pp->dev, "error with ioremap\n");
610 611
			ret = -ENOMEM;
			goto error;
612
		}
613 614
	}

615 616 617
	ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
	if (ret)
		pp->lanes = 0;
618

J
Jingoo Han 已提交
619
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
620 621 622 623 624 625
		if (!pp->ops->msi_host_init) {
			pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
						MAX_MSI_IRQS, &msi_domain_ops,
						&dw_pcie_msi_chip);
			if (!pp->irq_domain) {
				dev_err(pp->dev, "irq domain init failed\n");
626 627
				ret = -ENXIO;
				goto error;
628
			}
J
Jingoo Han 已提交
629

630 631 632 633 634
			for (i = 0; i < MAX_MSI_IRQS; i++)
				irq_create_mapping(pp->irq_domain, i);
		} else {
			ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
			if (ret < 0)
635
				goto error;
636
		}
J
Jingoo Han 已提交
637 638
	}

639 640
	pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);

641 642 643
	if (pp->ops->host_init)
		pp->ops->host_init(pp);

644 645 646 647 648 649 650 651 652
	pp->root_bus_nr = pp->busn->start;
	if (IS_ENABLED(CONFIG_PCI_MSI)) {
		bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
					    &dw_pcie_ops, pp, &res,
					    &dw_pcie_msi_chip);
		dw_pcie_msi_chip.dev = pp->dev;
	} else
		bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
					pp, &res);
653 654 655 656
	if (!bus) {
		ret = -ENOMEM;
		goto error;
	}
657 658 659 660 661 662 663

	if (pp->ops->scan_bus)
		pp->ops->scan_bus(pp);

#ifdef CONFIG_ARM
	/* support old dtbs that incorrectly describe IRQs */
	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
664 665
#endif

666 667
	pci_bus_size_bridges(bus);
	pci_bus_assign_resources(bus);
668

669 670
	list_for_each_entry(child, &bus->children, node)
		pcie_bus_configure_settings(child);
671

672
	pci_bus_add_devices(bus);
673
	return 0;
674 675 676 677

error:
	pci_free_resource_list(&res);
	return ret;
678 679 680
}

static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
681 682
		u32 devfn, int where, int size, u32 *val)
{
683
	int ret, type;
684
	u32 busdev, cfg_size;
685 686
	u64 cpu_addr;
	void __iomem *va_cfg_base;
687

688 689 690
	if (pp->ops->rd_other_conf)
		return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);

691 692 693 694
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
695
		type = PCIE_ATU_TYPE_CFG0;
696
		cpu_addr = pp->cfg0_base;
697 698
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
699
	} else {
700
		type = PCIE_ATU_TYPE_CFG1;
701
		cpu_addr = pp->cfg1_base;
702 703
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
704 705
	}

706 707 708
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
				  type, cpu_addr,
				  busdev, cfg_size);
709
	ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
710
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
711
				  PCIE_ATU_TYPE_IO, pp->io_base,
712 713
				  pp->io_bus_addr, pp->io_size);

714 715 716
	return ret;
}

717
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
718 719
		u32 devfn, int where, int size, u32 val)
{
720
	int ret, type;
721
	u32 busdev, cfg_size;
722 723
	u64 cpu_addr;
	void __iomem *va_cfg_base;
724

725 726 727
	if (pp->ops->wr_other_conf)
		return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);

728 729 730 731
	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
		 PCIE_ATU_FUNC(PCI_FUNC(devfn));

	if (bus->parent->number == pp->root_bus_nr) {
732
		type = PCIE_ATU_TYPE_CFG0;
733
		cpu_addr = pp->cfg0_base;
734 735
		cfg_size = pp->cfg0_size;
		va_cfg_base = pp->va_cfg0_base;
736
	} else {
737
		type = PCIE_ATU_TYPE_CFG1;
738
		cpu_addr = pp->cfg1_base;
739 740
		cfg_size = pp->cfg1_size;
		va_cfg_base = pp->va_cfg1_base;
741 742
	}

743 744 745
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
				  type, cpu_addr,
				  busdev, cfg_size);
746
	ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
747
	dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
748
				  PCIE_ATU_TYPE_IO, pp->io_base,
749 750
				  pp->io_bus_addr, pp->io_size);

751 752 753
	return ret;
}

754
static int dw_pcie_valid_config(struct pcie_port *pp,
755 756 757 758
				struct pci_bus *bus, int dev)
{
	/* If there is no link, then there is no device */
	if (bus->number != pp->root_bus_nr) {
759
		if (!dw_pcie_link_up(pp))
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
			return 0;
	}

	/* access only one slot on each root port */
	if (bus->number == pp->root_bus_nr && dev > 0)
		return 0;

	/*
	 * do not read more than one device on the bus directly attached
	 * to RC's (Virtual Bridge's) DS side.
	 */
	if (bus->primary == pp->root_bus_nr && dev > 0)
		return 0;

	return 1;
}

777
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
778 779
			int size, u32 *val)
{
780
	struct pcie_port *pp = bus->sysdata;
781

782
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
783 784 785 786
		*val = 0xffffffff;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

787 788
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_rd_own_conf(pp, where, size, val);
789

790
	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
791 792
}

793
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
794 795
			int where, int size, u32 val)
{
796
	struct pcie_port *pp = bus->sysdata;
797

798
	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
799 800
		return PCIBIOS_DEVICE_NOT_FOUND;

801 802
	if (bus->number == pp->root_bus_nr)
		return dw_pcie_wr_own_conf(pp, where, size, val);
803

804
	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
805 806
}

807 808 809
static struct pci_ops dw_pcie_ops = {
	.read = dw_pcie_rd_conf,
	.write = dw_pcie_wr_conf,
810 811
};

812
void dw_pcie_setup_rc(struct pcie_port *pp)
813 814 815
{
	u32 val;

816
	/* set the number of lanes */
817
	val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
818
	val &= ~PORT_LINK_MODE_MASK;
819 820 821 822 823 824 825 826 827 828
	switch (pp->lanes) {
	case 1:
		val |= PORT_LINK_MODE_1_LANES;
		break;
	case 2:
		val |= PORT_LINK_MODE_2_LANES;
		break;
	case 4:
		val |= PORT_LINK_MODE_4_LANES;
		break;
829 830 831
	case 8:
		val |= PORT_LINK_MODE_8_LANES;
		break;
832 833 834
	default:
		dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
		return;
835
	}
836
	dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
837 838

	/* set link width speed control register */
839
	val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
840
	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
841 842 843 844 845 846 847 848 849 850
	switch (pp->lanes) {
	case 1:
		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
		break;
	case 2:
		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
		break;
	case 4:
		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
		break;
851 852 853
	case 8:
		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
		break;
854
	}
855
	dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
856 857

	/* setup RC BARs */
858
	dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
859
	dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
860 861

	/* setup interrupt pins */
862
	val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
863 864
	val &= 0xffff00ff;
	val |= 0x00000100;
865
	dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
866 867

	/* setup bus numbers */
868
	val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
869 870
	val &= 0xff000000;
	val |= 0x00010100;
871
	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
872 873

	/* setup command register */
874
	val = dw_pcie_readl_rc(pp, PCI_COMMAND);
875 876 877
	val &= 0xffff0000;
	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
878
	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897

	/*
	 * If the platform provides ->rd_other_conf, it means the platform
	 * uses its own address translation component rather than ATU, so
	 * we should not program the ATU here.
	 */
	if (!pp->ops->rd_other_conf)
		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
					  PCIE_ATU_TYPE_MEM, pp->mem_base,
					  pp->mem_bus_addr, pp->mem_size);

	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);

	/* program correct class for RC */
	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);

	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
	val |= PORT_LOGIC_SPEED_CHANGE;
	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
898 899 900
}

MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
901
MODULE_DESCRIPTION("Designware PCIe host controller driver");
902
MODULE_LICENSE("GPL v2");