pci_fire.c 13.9 KB
Newer Older
1 2 3 4 5 6 7 8
/* pci_fire.c: Sun4u platform PCI-E controller support.
 *
 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
 */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
9 10
#include <linux/msi.h>
#include <linux/irq.h>
11
#include <linux/of_device.h>
12 13

#include <asm/prom.h>
14
#include <asm/irq.h>
15
#include <asm/upa.h>
16 17 18

#include "pci_impl.h"

19 20 21
#define DRIVER_NAME	"fire"
#define PFX		DRIVER_NAME ": "

22 23 24
#define FIRE_IOMMU_CONTROL	0x40000UL
#define FIRE_IOMMU_TSBBASE	0x40008UL
#define FIRE_IOMMU_FLUSH	0x40100UL
25
#define FIRE_IOMMU_FLUSHINV	0x40108UL
26

27
static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
28 29 30 31
{
	struct iommu *iommu = pbm->iommu;
	u32 vdma[2], dma_mask;
	u64 control;
32
	int tsbsize, err;
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53

	/* No virtual-dma property on these guys, use largest size.  */
	vdma[0] = 0xc0000000; /* base */
	vdma[1] = 0x40000000; /* size */
	dma_mask = 0xffffffff;
	tsbsize = 128;

	/* Register addresses. */
	iommu->iommu_control  = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
	iommu->iommu_tsbbase  = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
	iommu->iommu_flush    = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
	iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;

	/* We use the main control/status register of FIRE as the write
	 * completion register.
	 */
	iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;

	/*
	 * Invalidate TLB Entries.
	 */
54
	upa_writeq(~(u64)0, iommu->iommu_flushinv);
55

56 57
	err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
			       pbm->numa_node);
58 59
	if (err)
		return err;
60

61
	upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
62

63
	control = upa_readq(iommu->iommu_control);
64 65 66 67
	control |= (0x00000400 /* TSB cache snoop enable */	|
		    0x00000300 /* Cache mode */			|
		    0x00000002 /* Bypass enable */		|
		    0x00000001 /* Translation enable */);
68
	upa_writeq(control, iommu->iommu_control);
69 70

	return 0;
71 72
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
#ifdef CONFIG_PCI_MSI
struct pci_msiq_entry {
	u64		word0;
#define MSIQ_WORD0_RESV			0x8000000000000000UL
#define MSIQ_WORD0_FMT_TYPE		0x7f00000000000000UL
#define MSIQ_WORD0_FMT_TYPE_SHIFT	56
#define MSIQ_WORD0_LEN			0x00ffc00000000000UL
#define MSIQ_WORD0_LEN_SHIFT		46
#define MSIQ_WORD0_ADDR0		0x00003fff00000000UL
#define MSIQ_WORD0_ADDR0_SHIFT		32
#define MSIQ_WORD0_RID			0x00000000ffff0000UL
#define MSIQ_WORD0_RID_SHIFT		16
#define MSIQ_WORD0_DATA0		0x000000000000ffffUL
#define MSIQ_WORD0_DATA0_SHIFT		0

#define MSIQ_TYPE_MSG			0x6
#define MSIQ_TYPE_MSI32			0xb
#define MSIQ_TYPE_MSI64			0xf

	u64		word1;
#define MSIQ_WORD1_ADDR1		0xffffffffffff0000UL
#define MSIQ_WORD1_ADDR1_SHIFT		16
#define MSIQ_WORD1_DATA1		0x000000000000ffffUL
#define MSIQ_WORD1_DATA1_SHIFT		0

	u64		resv[6];
};

/* All MSI registers are offset from pbm->pbm_regs */
#define EVENT_QUEUE_BASE_ADDR_REG	0x010000UL
#define  EVENT_QUEUE_BASE_ADDR_ALL_ONES	0xfffc000000000000UL

#define EVENT_QUEUE_CONTROL_SET(EQ)	(0x011000UL + (EQ) * 0x8UL)
#define  EVENT_QUEUE_CONTROL_SET_OFLOW	0x0200000000000000UL
#define  EVENT_QUEUE_CONTROL_SET_EN	0x0000100000000000UL

#define EVENT_QUEUE_CONTROL_CLEAR(EQ)	(0x011200UL + (EQ) * 0x8UL)
#define  EVENT_QUEUE_CONTROL_CLEAR_OF	0x0200000000000000UL
#define  EVENT_QUEUE_CONTROL_CLEAR_E2I	0x0000800000000000UL
#define  EVENT_QUEUE_CONTROL_CLEAR_DIS	0x0000100000000000UL

#define EVENT_QUEUE_STATE(EQ)		(0x011400UL + (EQ) * 0x8UL)
#define  EVENT_QUEUE_STATE_MASK		0x0000000000000007UL
#define  EVENT_QUEUE_STATE_IDLE		0x0000000000000001UL
#define  EVENT_QUEUE_STATE_ACTIVE	0x0000000000000002UL
#define  EVENT_QUEUE_STATE_ERROR	0x0000000000000004UL

#define EVENT_QUEUE_TAIL(EQ)		(0x011600UL + (EQ) * 0x8UL)
#define  EVENT_QUEUE_TAIL_OFLOW		0x0200000000000000UL
#define  EVENT_QUEUE_TAIL_VAL		0x000000000000007fUL

#define EVENT_QUEUE_HEAD(EQ)		(0x011800UL + (EQ) * 0x8UL)
#define  EVENT_QUEUE_HEAD_VAL		0x000000000000007fUL

#define MSI_MAP(MSI)			(0x020000UL + (MSI) * 0x8UL)
#define  MSI_MAP_VALID			0x8000000000000000UL
#define  MSI_MAP_EQWR_N			0x4000000000000000UL
#define  MSI_MAP_EQNUM			0x000000000000003fUL

#define MSI_CLEAR(MSI)			(0x028000UL + (MSI) * 0x8UL)
#define  MSI_CLEAR_EQWR_N		0x4000000000000000UL

#define IMONDO_DATA0			0x02C000UL
#define  IMONDO_DATA0_DATA		0xffffffffffffffc0UL

#define IMONDO_DATA1			0x02C008UL
#define  IMONDO_DATA1_DATA		0xffffffffffffffffUL

#define MSI_32BIT_ADDR			0x034000UL
#define  MSI_32BIT_ADDR_VAL		0x00000000ffff0000UL

#define MSI_64BIT_ADDR			0x034008UL
#define  MSI_64BIT_ADDR_VAL		0xffffffffffff0000UL

147 148 149
static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
			     unsigned long *head)
{
150
	*head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
151 152 153 154 155
	return 0;
}

static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
				unsigned long *head, unsigned long *msi)
156
{
157
	unsigned long type_fmt, type, msi_num;
158 159
	struct pci_msiq_entry *base, *ep;

160 161
	base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
	ep = &base[*head];
162

163 164
	if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
		return 0;
165

166 167 168 169 170 171
	type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
		    MSIQ_WORD0_FMT_TYPE_SHIFT);
	type = (type_fmt >> 3);
	if (unlikely(type != MSIQ_TYPE_MSI32 &&
		     type != MSIQ_TYPE_MSI64))
		return -EINVAL;
172

173 174
	*msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
			  MSIQ_WORD0_DATA0_SHIFT);
175

176
	upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
177 178 179 180 181 182 183 184 185 186

	/* Clear the entry.  */
	ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;

	/* Go to next entry in ring.  */
	(*head)++;
	if (*head >= pbm->msiq_ent_count)
		*head = 0;

	return 1;
187 188
}

189 190
static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
			     unsigned long head)
191
{
192
	upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
193 194
	return 0;
}
195

196 197 198 199
static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
			      unsigned long msi, int is_msi64)
{
	u64 val;
200

201
	val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
202 203
	val &= ~(MSI_MAP_EQNUM);
	val |= msiqid;
204
	upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
205

206
	upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
207

208
	val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
209
	val |= MSI_MAP_VALID;
210
	upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
211 212 213 214

	return 0;
}

215
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
216
{
217 218 219
	unsigned long msiqid;
	u64 val;

220
	val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
221 222 223 224
	msiqid = (val & MSI_MAP_EQNUM);

	val &= ~MSI_MAP_VALID;

225
	upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
226 227

	return 0;
228 229
}

230
static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
231 232 233 234 235 236 237 238 239 240 241 242 243
{
	unsigned long pages, order, i;

	order = get_order(512 * 1024);
	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
	if (pages == 0UL) {
		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
		       order);
		return -ENOMEM;
	}
	memset((char *)pages, 0, PAGE_SIZE << order);
	pbm->msi_queues = (void *) pages;

244 245 246
	upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
		    __pa(pbm->msi_queues)),
		   pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
247

248 249
	upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
	upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
250

251 252
	upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
	upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
253 254

	for (i = 0; i < pbm->msiq_num; i++) {
255 256
		upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
		upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
257 258 259 260 261
	}

	return 0;
}

262
static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
263
{
264
	unsigned long pages, order;
265

266 267
	order = get_order(512 * 1024);
	pages = (unsigned long) pbm->msi_queues;
268

269
	free_pages(pages, order);
270

271
	pbm->msi_queues = NULL;
272 273
}

274 275 276
static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
				   unsigned long msiqid,
				   unsigned long devino)
277
{
278 279 280 281
	unsigned long cregs = (unsigned long) pbm->pbm_regs;
	unsigned long imap_reg, iclr_reg, int_ctrlr;
	unsigned int virt_irq;
	int fixup;
282 283
	u64 val;

284 285
	imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
	iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
286

287 288
	/* XXX iterate amongst the 4 IRQ controllers XXX */
	int_ctrlr = (1UL << 6);
289

290
	val = upa_readq(imap_reg);
291
	val |= (1UL << 63) | int_ctrlr;
292
	upa_writeq(val, imap_reg);
293

294
	fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
295

296 297 298
	virt_irq = build_irq(fixup, iclr_reg, imap_reg);
	if (!virt_irq)
		return -ENOMEM;
299

300 301
	upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
		   pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
302

303
	return virt_irq;
304 305
}

306 307 308 309 310 311 312 313 314 315
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
	.get_head	=	pci_fire_get_head,
	.dequeue_msi	=	pci_fire_dequeue_msi,
	.set_head	=	pci_fire_set_head,
	.msi_setup	=	pci_fire_msi_setup,
	.msi_teardown	=	pci_fire_msi_teardown,
	.msiq_alloc	=	pci_fire_msiq_alloc,
	.msiq_free	=	pci_fire_msiq_free,
	.msiq_build_irq	=	pci_fire_msiq_build_irq,
};
316 317 318

static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
319
	sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
320 321 322 323 324 325 326
}
#else /* CONFIG_PCI_MSI */
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL	0x470010UL
#define  FIRE_PARITY_ENAB	0x8000000000000000UL
#define FIRE_FATAL_RESET_CTL	0x471028UL
#define  FIRE_FATAL_RESET_SPARE	0x0000000004000000UL
#define  FIRE_FATAL_RESET_MB	0x0000000002000000UL
#define  FIRE_FATAL_RESET_CPE	0x0000000000008000UL
#define  FIRE_FATAL_RESET_APE	0x0000000000004000UL
#define  FIRE_FATAL_RESET_PIO	0x0000000000000040UL
#define  FIRE_FATAL_RESET_JW	0x0000000000000004UL
#define  FIRE_FATAL_RESET_JI	0x0000000000000002UL
#define  FIRE_FATAL_RESET_JR	0x0000000000000001UL
#define FIRE_CORE_INTR_ENABLE	0x471800UL

/* Based at pbm->pbm_regs */
#define FIRE_TLU_CTRL		0x80000UL
#define  FIRE_TLU_CTRL_TIM	0x00000000da000000UL
#define  FIRE_TLU_CTRL_QDET	0x0000000000000100UL
#define  FIRE_TLU_CTRL_CFG	0x0000000000000001UL
#define FIRE_TLU_DEV_CTRL	0x90008UL
#define FIRE_TLU_LINK_CTRL	0x90020UL
#define FIRE_TLU_LINK_CTRL_CLK	0x0000000000000040UL
#define FIRE_LPU_RESET		0xe2008UL
#define FIRE_LPU_LLCFG		0xe2200UL
#define  FIRE_LPU_LLCFG_VC0	0x0000000000000100UL
#define FIRE_LPU_FCTRL_UCTRL	0xe2240UL
#define  FIRE_LPU_FCTRL_UCTRL_N	0x0000000000000002UL
#define  FIRE_LPU_FCTRL_UCTRL_P	0x0000000000000001UL
#define FIRE_LPU_TXL_FIFOP	0xe2430UL
#define FIRE_LPU_LTSSM_CFG2	0xe2788UL
#define FIRE_LPU_LTSSM_CFG3	0xe2790UL
#define FIRE_LPU_LTSSM_CFG4	0xe2798UL
#define FIRE_LPU_LTSSM_CFG5	0xe27a0UL
#define FIRE_DMC_IENAB		0x31800UL
#define FIRE_DMC_DBG_SEL_A	0x53000UL
#define FIRE_DMC_DBG_SEL_B	0x53008UL
#define FIRE_PEC_IENAB		0x51800UL

static void pci_fire_hw_init(struct pci_pbm_info *pbm)
{
	u64 val;

369 370
	upa_writeq(FIRE_PARITY_ENAB,
		   pbm->controller_regs + FIRE_PARITY_CONTROL);
371

372
	upa_writeq((FIRE_FATAL_RESET_SPARE |
373 374 375 376 377 378
		    FIRE_FATAL_RESET_MB |
		    FIRE_FATAL_RESET_CPE |
		    FIRE_FATAL_RESET_APE |
		    FIRE_FATAL_RESET_PIO |
		    FIRE_FATAL_RESET_JW |
		    FIRE_FATAL_RESET_JI |
379 380
		    FIRE_FATAL_RESET_JR),
		   pbm->controller_regs + FIRE_FATAL_RESET_CTL);
381

382
	upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
383

384
	val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
385 386 387
	val |= (FIRE_TLU_CTRL_TIM |
		FIRE_TLU_CTRL_QDET |
		FIRE_TLU_CTRL_CFG);
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
	upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
	upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
		   pbm->pbm_regs + FIRE_TLU_LINK_CTRL);

	upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
	upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
	upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
		   pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
	upa_writeq(((0xffff << 16) | (0x0000 << 0)),
		   pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
	upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
	upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
	upa_writeq((2 << 16) | (140 << 8),
		   pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
	upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);

	upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
	upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
	upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);

	upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
410 411
}

412
static int __init pci_fire_pbm_init(struct pci_pbm_info *pbm,
413
				    struct of_device *op, u32 portid)
414 415
{
	const struct linux_prom64_registers *regs;
416
	struct device_node *dp = op->node;
417
	int err;
418

419 420
	pbm->numa_node = -1;

421 422
	pbm->pci_ops = &sun4u_pci_ops;
	pbm->config_space_reg_bits = 12;
423

424 425
	pbm->index = pci_num_pbms++;

426
	pbm->portid = portid;
427
	pbm->op = op;
428 429 430 431 432 433 434 435 436 437
	pbm->name = dp->full_name;

	regs = of_get_property(dp, "reg", NULL);
	pbm->pbm_regs = regs[0].phys_addr;
	pbm->controller_regs = regs[1].phys_addr - 0x410000UL;

	printk("%s: SUN4U PCIE Bus Module\n", pbm->name);

	pci_determine_mem_io_space(pbm);

438
	pci_get_pbm_props(pbm);
439 440

	pci_fire_hw_init(pbm);
441

442 443 444 445 446 447
	err = pci_fire_pbm_iommu_init(pbm);
	if (err)
		return err;

	pci_fire_msi_init(pbm);

448 449 450
	pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);

	/* XXX register error interrupt handlers XXX */
451

452 453
	pbm->next = pci_pbm_root;
	pci_pbm_root = pbm;
454 455 456 457

	return 0;
}

A
Al Viro 已提交
458
static int __init fire_probe(struct of_device *op,
459
				const struct of_device_id *match)
460
{
461
	struct device_node *dp = op->node;
462
	struct pci_pbm_info *pbm;
463 464 465
	struct iommu *iommu;
	u32 portid;
	int err;
466

467
	portid = of_getintprop_default(dp, "portid", 0xff);
468

469
	err = -ENOMEM;
470 471 472
	pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
	if (!pbm) {
		printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
473
		goto out_err;
474
	}
475

476
	iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
477
	if (!iommu) {
478
		printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
479
		goto out_free_controller;
480
	}
481

482
	pbm->iommu = iommu;
483

484 485 486
	err = pci_fire_pbm_init(pbm, op, portid);
	if (err)
		goto out_free_iommu;
487

488
	dev_set_drvdata(&op->dev, pbm);
489

490
	return 0;
491

492 493
out_free_iommu:
	kfree(pbm->iommu);
494 495
			
out_free_controller:
496
	kfree(pbm);
497 498

out_err:
499 500 501
	return err;
}

502
static struct of_device_id __initdata fire_match[] = {
503 504 505 506 507 508
	{
		.name = "pci",
		.compatible = "pciex108e,80f0",
	},
	{},
};
509

510 511 512 513 514 515 516 517 518
static struct of_platform_driver fire_driver = {
	.name		= DRIVER_NAME,
	.match_table	= fire_match,
	.probe		= fire_probe,
};

static int __init fire_init(void)
{
	return of_register_driver(&fire_driver, &of_bus_type);
519
}
520 521

subsys_initcall(fire_init);