pci-calgary_64.c 39.6 KB
Newer Older
1 2 3
/*
 * Derived from arch/powerpc/kernel/iommu.c
 *
4
 * Copyright IBM Corporation, 2006-2007
5
 * Copyright (C) 2006  Jon Mason <jdmason@kudzu.us>
6
 *
7
 * Author: Jon Mason <jdmason@kudzu.us>
8 9
 * Author: Muli Ben-Yehuda <muli@il.ibm.com>

10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/delay.h>
38
#include <asm/iommu.h>
39 40 41 42 43
#include <asm/calgary.h>
#include <asm/tce.h>
#include <asm/pci-direct.h>
#include <asm/system.h>
#include <asm/dma.h>
44
#include <asm/rio.h>
45

46 47 48 49 50 51
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
#else
int use_calgary __read_mostly = 0;
#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */

52
#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
53
#define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
54 55

/* register offsets inside the host bridge space */
56 57
#define CALGARY_CONFIG_REG	0x0108
#define PHB_CSR_OFFSET		0x0110 /* Channel Status */
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
#define PHB_PLSSR_OFFSET	0x0120
#define PHB_CONFIG_RW_OFFSET	0x0160
#define PHB_IOBASE_BAR_LOW	0x0170
#define PHB_IOBASE_BAR_HIGH	0x0180
#define PHB_MEM_1_LOW		0x0190
#define PHB_MEM_1_HIGH		0x01A0
#define PHB_IO_ADDR_SIZE	0x01B0
#define PHB_MEM_1_SIZE		0x01C0
#define PHB_MEM_ST_OFFSET	0x01D0
#define PHB_AER_OFFSET		0x0200
#define PHB_CONFIG_0_HIGH	0x0220
#define PHB_CONFIG_0_LOW	0x0230
#define PHB_CONFIG_0_END	0x0240
#define PHB_MEM_2_LOW		0x02B0
#define PHB_MEM_2_HIGH		0x02C0
#define PHB_MEM_2_SIZE_HIGH	0x02D0
#define PHB_MEM_2_SIZE_LOW	0x02E0
#define PHB_DOSHOLE_OFFSET	0x08E0

77
/* CalIOC2 specific */
78 79 80
#define PHB_SAVIOR_L2		0x0DB0
#define PHB_PAGE_MIG_CTRL	0x0DA8
#define PHB_PAGE_MIG_DEBUG	0x0DA0
81
#define PHB_ROOT_COMPLEX_STATUS 0x0CB0
82

83 84 85 86 87 88 89 90 91 92 93
/* PHB_CONFIG_RW */
#define PHB_TCE_ENABLE		0x20000000
#define PHB_SLOT_DISABLE	0x1C000000
#define PHB_DAC_DISABLE		0x01000000
#define PHB_MEM2_ENABLE		0x00400000
#define PHB_MCSR_ENABLE		0x00100000
/* TAR (Table Address Register) */
#define TAR_SW_BITS		0x0000ffffffff800fUL
#define TAR_VALID		0x0000000000000008UL
/* CSR (Channel/DMA Status Register) */
#define CSR_AGENT_MASK		0xffe0ffff
94
/* CCR (Calgary Configuration Register) */
95
#define CCR_2SEC_TIMEOUT	0x000000000000000EUL
96
/* PMCR/PMDR (Page Migration Control/Debug Registers */
97 98 99
#define PMR_SOFTSTOP		0x80000000
#define PMR_SOFTSTOPFAULT	0x40000000
#define PMR_HARDSTOP		0x20000000
100 101

#define MAX_NUM_OF_PHBS		8 /* how many PHBs in total? */
102
#define MAX_NUM_CHASSIS		8 /* max number of chassis */
103 104
/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
#define MAX_PHB_BUS_NUM		(MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
#define PHBS_PER_CALGARY	4

/* register offsets in Calgary's internal register space */
static const unsigned long tar_offsets[] = {
	0x0580 /* TAR0 */,
	0x0588 /* TAR1 */,
	0x0590 /* TAR2 */,
	0x0598 /* TAR3 */
};

static const unsigned long split_queue_offsets[] = {
	0x4870 /* SPLIT QUEUE 0 */,
	0x5870 /* SPLIT QUEUE 1 */,
	0x6870 /* SPLIT QUEUE 2 */,
	0x7870 /* SPLIT QUEUE 3 */
};

static const unsigned long phb_offsets[] = {
	0x8000 /* PHB0 */,
	0x9000 /* PHB1 */,
	0xA000 /* PHB2 */,
	0xB000 /* PHB3 */
};

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/* PHB debug registers */

static const unsigned long phb_debug_offsets[] = {
	0x4000	/* PHB 0 DEBUG */,
	0x5000	/* PHB 1 DEBUG */,
	0x6000	/* PHB 2 DEBUG */,
	0x7000	/* PHB 3 DEBUG */
};

/*
 * STUFF register for each debug PHB,
 * byte 1 = start bus number, byte 2 = end bus number
 */

#define PHB_DEBUG_STUFF_OFFSET	0x0020

145 146
#define EMERGENCY_PAGES 32 /* = 128KB */

147 148 149 150
unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
static int translate_empty_slots __read_mostly = 0;
static int calgary_detected __read_mostly = 0;

151 152
static struct rio_table_hdr	*rio_table_hdr __initdata;
static struct scal_detail	*scal_devs[MAX_NUMNODES] __initdata;
153
static struct rio_detail	*rio_devs[MAX_NUMNODES * 4] __initdata;
154

155 156
struct calgary_bus_info {
	void *tce_space;
157
	unsigned char translation_disabled;
158
	signed char phbid;
159
	void __iomem *bbar;
160 161
};

162 163
static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
static void calgary_tce_cache_blast(struct iommu_table *tbl);
164
static void calgary_dump_error_regs(struct iommu_table *tbl);
165
static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
166
static void calioc2_tce_cache_blast(struct iommu_table *tbl);
167
static void calioc2_dump_error_regs(struct iommu_table *tbl);
168 169 170

static struct cal_chipset_ops calgary_chip_ops = {
	.handle_quirks = calgary_handle_quirks,
171 172
	.tce_cache_blast = calgary_tce_cache_blast,
	.dump_error_regs = calgary_dump_error_regs
173
};
174

175 176
static struct cal_chipset_ops calioc2_chip_ops = {
	.handle_quirks = calioc2_handle_quirks,
177 178
	.tce_cache_blast = calioc2_tce_cache_blast,
	.dump_error_regs = calioc2_dump_error_regs
179 180
};

181
static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
182 183 184

/* enable this to stress test the chip's TCE cache */
#ifdef CONFIG_IOMMU_DEBUG
185 186
int debugging __read_mostly = 1;

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static inline unsigned long verify_bit_range(unsigned long* bitmap,
	int expected, unsigned long start, unsigned long end)
{
	unsigned long idx = start;

	BUG_ON(start >= end);

	while (idx < end) {
		if (!!test_bit(idx, bitmap) != expected)
			return idx;
		++idx;
	}

	/* all bits have the expected value */
	return ~0UL;
}
203 204 205
#else /* debugging is disabled */
int debugging __read_mostly = 0;

206 207 208 209 210
static inline unsigned long verify_bit_range(unsigned long* bitmap,
	int expected, unsigned long start, unsigned long end)
{
	return ~0UL;
}
211

212
#endif /* CONFIG_IOMMU_DEBUG */
213 214 215 216 217 218 219 220 221 222 223 224 225

static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
{
	unsigned int npages;

	npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
	npages >>= PAGE_SHIFT;

	return npages;
}

static inline int translate_phb(struct pci_dev* dev)
{
226
	int disabled = bus_info[dev->bus->number].translation_disabled;
227 228 229 230
	return !disabled;
}

static void iommu_range_reserve(struct iommu_table *tbl,
231
	unsigned long start_addr, unsigned int npages)
232 233 234
{
	unsigned long index;
	unsigned long end;
235
	unsigned long badbit;
236
	unsigned long flags;
237 238 239 240 241 242 243 244 245 246 247

	index = start_addr >> PAGE_SHIFT;

	/* bail out if we're asked to reserve a region we don't cover */
	if (index >= tbl->it_size)
		return;

	end = index + npages;
	if (end > tbl->it_size) /* don't go off the table */
		end = tbl->it_size;

248 249
	spin_lock_irqsave(&tbl->it_lock, flags);

250 251 252
	badbit = verify_bit_range(tbl->it_map, 0, index, end);
	if (badbit != ~0UL) {
		if (printk_ratelimit())
253 254
			printk(KERN_ERR "Calgary: entry already allocated at "
			       "0x%lx tbl %p dma 0x%lx npages %u\n",
255
			       badbit, tbl, start_addr, npages);
256
	}
257 258

	set_bit_string(tbl->it_map, index, npages);
259 260

	spin_unlock_irqrestore(&tbl->it_lock, flags);
261 262 263 264 265
}

static unsigned long iommu_range_alloc(struct iommu_table *tbl,
	unsigned int npages)
{
266
	unsigned long flags;
267 268 269 270
	unsigned long offset;

	BUG_ON(npages == 0);

271 272
	spin_lock_irqsave(&tbl->it_lock, flags);

273 274 275
	offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
				       tbl->it_size, npages);
	if (offset == ~0UL) {
276
		tbl->chip_ops->tce_cache_blast(tbl);
277 278 279 280
		offset = find_next_zero_string(tbl->it_map, 0,
					       tbl->it_size, npages);
		if (offset == ~0UL) {
			printk(KERN_WARNING "Calgary: IOMMU full.\n");
281
			spin_unlock_irqrestore(&tbl->it_lock, flags);
282 283 284 285 286 287 288 289 290 291 292
			if (panic_on_overflow)
				panic("Calgary: fix the allocator.\n");
			else
				return bad_dma_address;
		}
	}

	set_bit_string(tbl->it_map, offset, npages);
	tbl->it_hint = offset + npages;
	BUG_ON(tbl->it_hint > tbl->it_size);

293 294
	spin_unlock_irqrestore(&tbl->it_lock, flags);

295 296 297 298 299 300
	return offset;
}

static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
	unsigned int npages, int direction)
{
301
	unsigned long entry;
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	dma_addr_t ret = bad_dma_address;

	entry = iommu_range_alloc(tbl, npages);

	if (unlikely(entry == bad_dma_address))
		goto error;

	/* set the return dma address */
	ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);

	/* put the TCEs in the HW table */
	tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
		  direction);

	return ret;

error:
	printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
	       "iommu %p\n", npages, tbl);
	return bad_dma_address;
}

324
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
325 326 327
	unsigned int npages)
{
	unsigned long entry;
328
	unsigned long badbit;
329
	unsigned long badend;
330
	unsigned long flags;
331 332 333 334 335 336 337 338 339

	/* were we called with bad_dma_address? */
	badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
	if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
		printk(KERN_ERR "Calgary: driver tried unmapping bad DMA "
		       "address 0x%Lx\n", dma_addr);
		WARN_ON(1);
		return;
	}
340 341 342 343 344 345 346

	entry = dma_addr >> PAGE_SHIFT;

	BUG_ON(entry + npages > tbl->it_size);

	tce_free(tbl, entry, npages);

347 348
	spin_lock_irqsave(&tbl->it_lock, flags);

349 350 351
	badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
	if (badbit != ~0UL) {
		if (printk_ratelimit())
352 353
			printk(KERN_ERR "Calgary: bit is off at 0x%lx "
			       "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
354
			       badbit, tbl, dma_addr, entry, npages);
355 356 357
	}

	__clear_bit_string(tbl->it_map, entry, npages);
358 359

	spin_unlock_irqrestore(&tbl->it_lock, flags);
360 361
}

362 363
static inline struct iommu_table *find_iommu_table(struct device *dev)
{
364 365
	struct pci_dev *pdev;
	struct pci_bus *pbus;
366 367
	struct iommu_table *tbl;

368 369
	pdev = to_pci_dev(dev);

370 371 372 373 374
	pbus = pdev->bus;

	/* is the device behind a bridge? Look for the root bus */
	while (pbus->parent)
		pbus = pbus->parent;
375

376
	tbl = pci_iommu(pbus);
377

378
	BUG_ON(tbl && (tbl->it_busno != pbus->number));
379 380 381 382

	return tbl;
}

383
static void calgary_unmap_sg(struct device *dev,
384 385
	struct scatterlist *sglist, int nelems, int direction)
{
386 387 388 389 390
	struct iommu_table *tbl = find_iommu_table(dev);

	if (!translate_phb(to_pci_dev(dev)))
		return;

391 392 393 394 395 396 397 398 399
	while (nelems--) {
		unsigned int npages;
		dma_addr_t dma = sglist->dma_address;
		unsigned int dmalen = sglist->dma_length;

		if (dmalen == 0)
			break;

		npages = num_dma_pages(dma, dmalen);
400
		iommu_free(tbl, dma, npages);
401 402 403 404 405 406 407 408 409
		sglist++;
	}
}

static int calgary_nontranslate_map_sg(struct device* dev,
	struct scatterlist *sg, int nelems, int direction)
{
	int i;

410
	for (i = 0; i < nelems; i++ ) {
411 412 413 414 415 416 417 418
		struct scatterlist *s = &sg[i];
		BUG_ON(!s->page);
		s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
		s->dma_length = s->length;
	}
	return nelems;
}

419
static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
420 421
	int nelems, int direction)
{
422
	struct iommu_table *tbl = find_iommu_table(dev);
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	unsigned long vaddr;
	unsigned int npages;
	unsigned long entry;
	int i;

	if (!translate_phb(to_pci_dev(dev)))
		return calgary_nontranslate_map_sg(dev, sg, nelems, direction);

	for (i = 0; i < nelems; i++ ) {
		struct scatterlist *s = &sg[i];
		BUG_ON(!s->page);

		vaddr = (unsigned long)page_address(s->page) + s->offset;
		npages = num_dma_pages(vaddr, s->length);

		entry = iommu_range_alloc(tbl, npages);
		if (entry == bad_dma_address) {
			/* makes sure unmap knows to stop */
			s->dma_length = 0;
			goto error;
		}

		s->dma_address = (entry << PAGE_SHIFT) | s->offset;

		/* insert into HW table */
		tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
			  direction);

		s->dma_length = s->length;
	}

	return nelems;
error:
456
	calgary_unmap_sg(dev, sg, nelems, direction);
457 458 459 460 461 462 463
	for (i = 0; i < nelems; i++) {
		sg[i].dma_address = bad_dma_address;
		sg[i].dma_length = 0;
	}
	return 0;
}

464
static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
465 466 467 468 469
	size_t size, int direction)
{
	dma_addr_t dma_handle = bad_dma_address;
	unsigned long uaddr;
	unsigned int npages;
470
	struct iommu_table *tbl = find_iommu_table(dev);
471 472 473 474 475 476 477 478 479 480 481 482

	uaddr = (unsigned long)vaddr;
	npages = num_dma_pages(uaddr, size);

	if (translate_phb(to_pci_dev(dev)))
		dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
	else
		dma_handle = virt_to_bus(vaddr);

	return dma_handle;
}

483
static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
484 485
	size_t size, int direction)
{
486
	struct iommu_table *tbl = find_iommu_table(dev);
487 488 489 490 491 492 493 494 495
	unsigned int npages;

	if (!translate_phb(to_pci_dev(dev)))
		return;

	npages = num_dma_pages(dma_handle, size);
	iommu_free(tbl, dma_handle, npages);
}

496
static void* calgary_alloc_coherent(struct device *dev, size_t size,
497 498 499 500 501
	dma_addr_t *dma_handle, gfp_t flag)
{
	void *ret = NULL;
	dma_addr_t mapping;
	unsigned int npages, order;
502
	struct iommu_table *tbl = find_iommu_table(dev);
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532

	size = PAGE_ALIGN(size); /* size rounded up to full pages */
	npages = size >> PAGE_SHIFT;
	order = get_order(size);

	/* alloc enough pages (and possibly more) */
	ret = (void *)__get_free_pages(flag, order);
	if (!ret)
		goto error;
	memset(ret, 0, size);

	if (translate_phb(to_pci_dev(dev))) {
		/* set up tces to cover the allocated range */
		mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
		if (mapping == bad_dma_address)
			goto free;

		*dma_handle = mapping;
	} else /* non translated slot */
		*dma_handle = virt_to_bus(ret);

	return ret;

free:
	free_pages((unsigned long)ret, get_order(size));
	ret = NULL;
error:
	return ret;
}

533
static const struct dma_mapping_ops calgary_dma_ops = {
534 535 536 537 538 539 540
	.alloc_coherent = calgary_alloc_coherent,
	.map_single = calgary_map_single,
	.unmap_single = calgary_unmap_single,
	.map_sg = calgary_map_sg,
	.unmap_sg = calgary_unmap_sg,
};

541 542 543 544 545
static inline void __iomem * busno_to_bbar(unsigned char num)
{
	return bus_info[num].bbar;
}

546 547
static inline int busno_to_phbid(unsigned char num)
{
548
	return bus_info[num].phbid;
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
}

static inline unsigned long split_queue_offset(unsigned char num)
{
	size_t idx = busno_to_phbid(num);

	return split_queue_offsets[idx];
}

static inline unsigned long tar_offset(unsigned char num)
{
	size_t idx = busno_to_phbid(num);

	return tar_offsets[idx];
}

static inline unsigned long phb_offset(unsigned char num)
{
	size_t idx = busno_to_phbid(num);

	return phb_offsets[idx];
}

static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
{
	unsigned long target = ((unsigned long)bar) | offset;
	return (void __iomem*)target;
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
static inline int is_calioc2(unsigned short device)
{
	return (device == PCI_DEVICE_ID_IBM_CALIOC2);
}

static inline int is_calgary(unsigned short device)
{
	return (device == PCI_DEVICE_ID_IBM_CALGARY);
}

static inline int is_cal_pci_dev(unsigned short device)
{
	return (is_calgary(device) || is_calioc2(device));
}

593
static void calgary_tce_cache_blast(struct iommu_table *tbl)
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
{
	u64 val;
	u32 aer;
	int i = 0;
	void __iomem *bbar = tbl->bbar;
	void __iomem *target;

	/* disable arbitration on the bus */
	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
	aer = readl(target);
	writel(0, target);

	/* read plssr to ensure it got there */
	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
	val = readl(target);

	/* poll split queues until all DMA activity is done */
	target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
	do {
		val = readq(target);
		i++;
	} while ((val & 0xff) != 0xff && i < 100);
	if (i == 100)
		printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
		       "continuing anyway\n");

	/* invalidate TCE cache */
	target = calgary_reg(bbar, tar_offset(tbl->it_busno));
	writeq(tbl->tar_val, target);

	/* enable arbitration */
	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
	writel(aer, target);
	(void)readl(target); /* flush */
}

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
static void calioc2_tce_cache_blast(struct iommu_table *tbl)
{
	void __iomem *bbar = tbl->bbar;
	void __iomem *target;
	u64 val64;
	u32 val;
	int i = 0;
	int count = 1;
	unsigned char bus = tbl->it_busno;

begin:
	printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
	       "sequence - count %d\n", bus, count);

	/* 1. using the Page Migration Control reg set SoftStop */
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
	val |= PMR_SOFTSTOP;
	printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
	writel(cpu_to_be32(val), target);

	/* 2. poll split queues until all DMA activity is done */
	printk(KERN_DEBUG "2a. starting to poll split queues\n");
	target = calgary_reg(bbar, split_queue_offset(bus));
	do {
		val64 = readq(target);
		i++;
	} while ((val64 & 0xff) != 0xff && i < 100);
	if (i == 100)
		printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, "
		       "continuing anyway\n");

	/* 3. poll Page Migration DEBUG for SoftStopFault */
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);

	/* 4. if SoftStopFault - goto (1) */
	if (val & PMR_SOFTSTOPFAULT) {
		if (++count < 100)
			goto begin;
		else {
			printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, "
			       "aborting TCE cache flush sequence!\n");
			return; /* pray for the best */
		}
	}

	/* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
	printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);

	/* 6. invalidate TCE cache */
	printk(KERN_DEBUG "6. invalidating TCE cache\n");
	target = calgary_reg(bbar, tar_offset(bus));
	writeq(tbl->tar_val, target);

	/* 7. Re-read PMCR */
	printk(KERN_DEBUG "7a. Re-reading PMCR\n");
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);

	/* 8. Remove HardStop */
	printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
	target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
	val = 0;
	printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
	writel(cpu_to_be32(val), target);
	val = be32_to_cpu(readl(target));
	printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
}

709 710 711 712 713 714 715 716 717
static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
	u64 limit)
{
	unsigned int numpages;

	limit = limit | 0xfffff;
	limit++;

	numpages = ((limit - start) >> PAGE_SHIFT);
718
	iommu_range_reserve(pci_iommu(dev->bus), start, numpages);
719 720 721 722 723 724 725
}

static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
{
	void __iomem *target;
	u64 low, high, sizelow;
	u64 start, limit;
726
	struct iommu_table *tbl = pci_iommu(dev->bus);
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	unsigned char busnum = dev->bus->number;
	void __iomem *bbar = tbl->bbar;

	/* peripheral MEM_1 region */
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
	low = be32_to_cpu(readl(target));
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
	high = be32_to_cpu(readl(target));
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
	sizelow = be32_to_cpu(readl(target));

	start = (high << 32) | low;
	limit = sizelow;

	calgary_reserve_mem_region(dev, start, limit);
}

static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
{
	void __iomem *target;
	u32 val32;
	u64 low, high, sizelow, sizehigh;
	u64 start, limit;
750
	struct iommu_table *tbl = pci_iommu(dev->bus);
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
	unsigned char busnum = dev->bus->number;
	void __iomem *bbar = tbl->bbar;

	/* is it enabled? */
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
	val32 = be32_to_cpu(readl(target));
	if (!(val32 & PHB_MEM2_ENABLE))
		return;

	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
	low = be32_to_cpu(readl(target));
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
	high = be32_to_cpu(readl(target));
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
	sizelow = be32_to_cpu(readl(target));
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
	sizehigh = be32_to_cpu(readl(target));

	start = (high << 32) | low;
	limit = (sizehigh << 32) | sizelow;

	calgary_reserve_mem_region(dev, start, limit);
}

/*
 * some regions of the IO address space do not get translated, so we
 * must not give devices IO addresses in those regions. The regions
 * are the 640KB-1MB region and the two PCI peripheral memory holes.
 * Reserve all of them in the IOMMU bitmap to avoid giving them out
 * later.
 */
static void __init calgary_reserve_regions(struct pci_dev *dev)
{
	unsigned int npages;
	u64 start;
786
	struct iommu_table *tbl = pci_iommu(dev->bus);
787

788 789
	/* reserve EMERGENCY_PAGES from bad_dma_address and up */
	iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
790 791

	/* avoid the BIOS/VGA first 640KB-1MB region */
792
	/* for CalIOC2 - avoid the entire first MB */
793 794 795 796 797
	if (is_calgary(dev->device)) {
		start = (640 * 1024);
		npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
	} else { /* calioc2 */
		start = 0;
798
		npages = (1 * 1024 * 1024) >> PAGE_SHIFT;
799
	}
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
	iommu_range_reserve(tbl, start, npages);

	/* reserve the two PCI peripheral memory regions in IO space */
	calgary_reserve_peripheral_mem_1(dev);
	calgary_reserve_peripheral_mem_2(dev);
}

static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
{
	u64 val64;
	u64 table_phys;
	void __iomem *target;
	int ret;
	struct iommu_table *tbl;

	/* build TCE tables for each PHB */
	ret = build_tce_table(dev, bbar);
	if (ret)
		return ret;

820
	tbl = pci_iommu(dev->bus);
821 822 823
	tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
	tce_free(tbl, 0, tbl->it_size);

824 825
	if (is_calgary(dev->device))
		tbl->chip_ops = &calgary_chip_ops;
826 827
	else if (is_calioc2(dev->device))
		tbl->chip_ops = &calioc2_chip_ops;
828 829
	else
		BUG();
830

831 832 833 834 835 836 837 838 839
	calgary_reserve_regions(dev);

	/* set TARs for each PHB */
	target = calgary_reg(bbar, tar_offset(dev->bus->number));
	val64 = be64_to_cpu(readq(target));

	/* zero out all TAR bits under sw control */
	val64 &= ~TAR_SW_BITS;
	table_phys = (u64)__pa(tbl->it_base);
840

841 842 843 844 845 846
	val64 |= table_phys;

	BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
	val64 |= (u64) specified_table_size;

	tbl->tar_val = cpu_to_be64(val64);
847

848 849 850 851 852 853
	writeq(tbl->tar_val, target);
	readq(target); /* flush */

	return 0;
}

854
static void __init calgary_free_bus(struct pci_dev *dev)
855 856
{
	u64 val64;
857
	struct iommu_table *tbl = pci_iommu(dev->bus);
858
	void __iomem *target;
859
	unsigned int bitmapsz;
860 861 862 863 864 865 866

	target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
	val64 = be64_to_cpu(readq(target));
	val64 &= ~TAR_SW_BITS;
	writeq(cpu_to_be64(val64), target);
	readq(target); /* flush */

867 868 869 870
	bitmapsz = tbl->it_size / BITS_PER_BYTE;
	free_pages((unsigned long)tbl->it_map, get_order(bitmapsz));
	tbl->it_map = NULL;

871
	kfree(tbl);
872 873
	
	set_pci_iommu(dev->bus, NULL);
874 875 876

	/* Can't free bootmem allocated memory after system is up :-( */
	bus_info[dev->bus->number].tce_space = NULL;
877 878
}

879 880 881
static void calgary_dump_error_regs(struct iommu_table *tbl)
{
	void __iomem *bbar = tbl->bbar;
882
	void __iomem *target;
883
	u32 csr, plssr;
884 885

	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
886 887 888 889
	csr = be32_to_cpu(readl(target));

	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
	plssr = be32_to_cpu(readl(target));
890 891 892

	/* If no error, the agent ID in the CSR is not valid */
	printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, "
893
	       "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr);
894 895 896 897 898 899
}

static void calioc2_dump_error_regs(struct iommu_table *tbl)
{
	void __iomem *bbar = tbl->bbar;
	u32 csr, csmr, plssr, mck, rcstat;
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
	void __iomem *target;
	unsigned long phboff = phb_offset(tbl->it_busno);
	unsigned long erroff;
	u32 errregs[7];
	int i;

	/* dump CSR */
	target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET);
	csr = be32_to_cpu(readl(target));
	/* dump PLSSR */
	target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET);
	plssr = be32_to_cpu(readl(target));
	/* dump CSMR */
	target = calgary_reg(bbar, phboff | 0x290);
	csmr = be32_to_cpu(readl(target));
	/* dump mck */
	target = calgary_reg(bbar, phboff | 0x800);
	mck = be32_to_cpu(readl(target));

919 920 921 922 923
	printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n",
	       tbl->it_busno);

	printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
	       csr, plssr, csmr, mck);
924 925 926 927

	/* dump rest of error regs */
	printk(KERN_EMERG "Calgary: ");
	for (i = 0; i < ARRAY_SIZE(errregs); i++) {
928 929
		/* err regs are at 0x810 - 0x870 */
		erroff = (0x810 + (i * 0x10));
930 931 932 933 934
		target = calgary_reg(bbar, phboff | erroff);
		errregs[i] = be32_to_cpu(readl(target));
		printk("0x%08x@0x%lx ", errregs[i], erroff);
	}
	printk("\n");
935 936 937 938 939 940

	/* root complex status */
	target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
	rcstat = be32_to_cpu(readl(target));
	printk(KERN_EMERG "Calgary: 0x%08x@0x%x\n", rcstat,
	       PHB_ROOT_COMPLEX_STATUS);
941 942
}

943 944 945
static void calgary_watchdog(unsigned long data)
{
	struct pci_dev *dev = (struct pci_dev *)data;
946
	struct iommu_table *tbl = pci_iommu(dev->bus);
947 948 949 950 951 952 953 954 955
	void __iomem *bbar = tbl->bbar;
	u32 val32;
	void __iomem *target;

	target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
	val32 = be32_to_cpu(readl(target));

	/* If no error, the agent ID in the CSR is not valid */
	if (val32 & CSR_AGENT_MASK) {
956
		tbl->chip_ops->dump_error_regs(tbl);
957 958

		/* reset error */
959 960 961 962
		writel(0, target);

		/* Disable bus that caused the error */
		target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
963
				     PHB_CONFIG_RW_OFFSET);
964 965 966 967 968 969 970 971 972 973
		val32 = be32_to_cpu(readl(target));
		val32 |= PHB_SLOT_DISABLE;
		writel(cpu_to_be32(val32), target);
		readl(target); /* flush */
	} else {
		/* Reset the timer */
		mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
	}
}

974 975
static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
	unsigned char busnum, unsigned long timeout)
976 977 978
{
	u64 val64;
	void __iomem *target;
979
	unsigned int phb_shift = ~0; /* silence gcc */
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	u64 mask;

	switch (busno_to_phbid(busnum)) {
	case 0: phb_shift = (63 - 19);
		break;
	case 1: phb_shift = (63 - 23);
		break;
	case 2: phb_shift = (63 - 27);
		break;
	case 3: phb_shift = (63 - 35);
		break;
	default:
		BUG_ON(busno_to_phbid(busnum));
	}

	target = calgary_reg(bbar, CALGARY_CONFIG_REG);
	val64 = be64_to_cpu(readq(target));

	/* zero out this PHB's timer bits */
	mask = ~(0xFUL << phb_shift);
	val64 &= mask;
1001
	val64 |= (timeout << phb_shift);
1002 1003 1004 1005
	writeq(cpu_to_be64(val64), target);
	readq(target); /* flush */
}

1006 1007 1008 1009 1010 1011 1012
static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
{
	unsigned char busnum = dev->bus->number;
	void __iomem *bbar = tbl->bbar;
	void __iomem *target;
	u32 val;

1013 1014 1015 1016 1017 1018 1019
	/*
	 * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
	 */
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2);
	val = cpu_to_be32(readl(target));
	val |= 0x00800000;
	writel(cpu_to_be32(val), target);
1020 1021 1022
}

static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1023 1024 1025 1026 1027 1028 1029
{
	unsigned char busnum = dev->bus->number;

	/*
	 * Give split completion a longer timeout on bus 1 for aic94xx
	 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
	 */
1030
	if (is_calgary(dev->device) && (busnum == 1))
1031 1032 1033 1034
		calgary_set_split_completion_timeout(tbl->bbar, busnum,
						     CCR_2SEC_TIMEOUT);
}

1035 1036 1037 1038 1039 1040 1041 1042 1043
static void __init calgary_enable_translation(struct pci_dev *dev)
{
	u32 val32;
	unsigned char busnum;
	void __iomem *target;
	void __iomem *bbar;
	struct iommu_table *tbl;

	busnum = dev->bus->number;
1044
	tbl = pci_iommu(dev->bus);
1045 1046 1047 1048 1049 1050 1051
	bbar = tbl->bbar;

	/* enable TCE in PHB Config Register */
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
	val32 = be32_to_cpu(readl(target));
	val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;

1052 1053 1054
	printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
	       (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
	       "Calgary" : "CalIOC2", busnum);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
	       "bus.\n");

	writel(cpu_to_be32(val32), target);
	readl(target); /* flush */

	init_timer(&tbl->watchdog_timer);
	tbl->watchdog_timer.function = &calgary_watchdog;
	tbl->watchdog_timer.data = (unsigned long)dev;
	mod_timer(&tbl->watchdog_timer, jiffies);
}

static void __init calgary_disable_translation(struct pci_dev *dev)
{
	u32 val32;
	unsigned char busnum;
	void __iomem *target;
	void __iomem *bbar;
	struct iommu_table *tbl;

	busnum = dev->bus->number;
1076
	tbl = pci_iommu(dev->bus);
1077 1078 1079 1080 1081 1082 1083
	bbar = tbl->bbar;

	/* disable TCE in PHB Config Register */
	target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
	val32 = be32_to_cpu(readl(target));
	val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);

1084
	printk(KERN_INFO "Calgary: disabling translation on PHB %#x!\n", busnum);
1085 1086 1087 1088 1089 1090
	writel(cpu_to_be32(val32), target);
	readl(target); /* flush */

	del_timer_sync(&tbl->watchdog_timer);
}

1091
static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
1092
{
1093
	pci_dev_get(dev);
1094
	set_pci_iommu(dev->bus, NULL);
1095 1096 1097 1098 1099 1100

	/* is the device behind a bridge? */
	if (dev->bus->parent)
		dev->bus->parent->self = dev;
	else
		dev->bus->self = dev;
1101 1102 1103 1104 1105
}

static int __init calgary_init_one(struct pci_dev *dev)
{
	void __iomem *bbar;
1106
	struct iommu_table *tbl;
1107 1108
	int ret;

1109 1110
	BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);

1111
	bbar = busno_to_bbar(dev->bus->number);
1112 1113
	ret = calgary_setup_tar(dev, bbar);
	if (ret)
1114
		goto done;
1115

1116
	pci_dev_get(dev);
1117 1118 1119 1120 1121 1122 1123 1124

	if (dev->bus->parent) {
		if (dev->bus->parent->self)
			printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
			       "bus->parent->self!\n", dev);
		dev->bus->parent->self = dev;
	} else
		dev->bus->self = dev;
1125

1126
	tbl = pci_iommu(dev->bus);
1127
	tbl->chip_ops->handle_quirks(tbl, dev);
1128

1129 1130 1131 1132 1133 1134 1135 1136
	calgary_enable_translation(dev);

	return 0;

done:
	return ret;
}

1137
static int __init calgary_locate_bbars(void)
1138
{
1139 1140
	int ret;
	int rioidx, phb, bus;
1141 1142
	void __iomem *bbar;
	void __iomem *target;
1143
	unsigned long offset;
1144 1145 1146
	u8 start_bus, end_bus;
	u32 val;

1147 1148 1149
	ret = -ENODATA;
	for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
		struct rio_detail *rio = rio_devs[rioidx];
1150

1151
		if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
1152 1153 1154
			continue;

		/* map entire 1MB of Calgary config space */
1155 1156 1157
		bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
		if (!bbar)
			goto error;
1158 1159

		for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
1160 1161
			offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
			target = calgary_reg(bbar, offset);
1162 1163

			val = be32_to_cpu(readl(target));
1164

1165
			start_bus = (u8)((val & 0x00FF0000) >> 16);
1166
			end_bus = (u8)((val & 0x0000FF00) >> 8);
1167 1168 1169 1170 1171 1172 1173 1174 1175

			if (end_bus) {
				for (bus = start_bus; bus <= end_bus; bus++) {
					bus_info[bus].bbar = bbar;
					bus_info[bus].phbid = phb;
				}
			} else {
				bus_info[start_bus].bbar = bbar;
				bus_info[start_bus].phbid = phb;
1176 1177 1178 1179
			}
		}
	}

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	return 0;

error:
	/* scan bus_info and iounmap any bbars we previously ioremap'd */
	for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
		if (bus_info[bus].bbar)
			iounmap(bus_info[bus].bbar);

	return ret;
}

static int __init calgary_init(void)
{
	int ret;
	struct pci_dev *dev = NULL;
1195
	void *tce_space;
1196 1197 1198 1199

	ret = calgary_locate_bbars();
	if (ret)
		return ret;
1200

1201
	do {
1202
		dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1203 1204
		if (!dev)
			break;
1205 1206
		if (!is_cal_pci_dev(dev->device))
			continue;
1207 1208 1209 1210
		if (!translate_phb(dev)) {
			calgary_init_one_nontraslated(dev);
			continue;
		}
1211
		tce_space = bus_info[dev->bus->number].tce_space;
M
Muli Ben-Yehuda 已提交
1212
		if (!tce_space && !translate_empty_slots)
1213
			continue;
M
Muli Ben-Yehuda 已提交
1214

1215 1216 1217
		ret = calgary_init_one(dev);
		if (ret)
			goto error;
1218
	} while (1);
1219 1220 1221 1222

	return ret;

error:
1223
	do {
1224
		dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
1225
					     PCI_ANY_ID, dev);
1226 1227
		if (!dev)
			break;
1228 1229
		if (!is_cal_pci_dev(dev->device))
			continue;
1230 1231 1232 1233
		if (!translate_phb(dev)) {
			pci_dev_put(dev);
			continue;
		}
1234
		if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots)
1235
			continue;
1236

1237
		calgary_disable_translation(dev);
1238
		calgary_free_bus(dev);
1239
		pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
1240
	} while (1);
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

	return ret;
}

static inline int __init determine_tce_table_size(u64 ram)
{
	int ret;

	if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
		return specified_table_size;

	/*
	 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
	 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
	 * larger table size has twice as many entries, so shift the
	 * max ram address by 13 to divide by 8K and then look at the
	 * order of the result to choose between 0-7.
	 */
	ret = get_order(ram >> 13);
	if (ret > TCE_TABLE_SIZE_8M)
		ret = TCE_TABLE_SIZE_8M;

	return ret;
}

1266 1267 1268 1269 1270 1271 1272
static int __init build_detail_arrays(void)
{
	unsigned long ptr;
	int i, scal_detail_size, rio_detail_size;

	if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
		printk(KERN_WARNING
1273
			"Calgary: MAX_NUMNODES too low! Defined as %d, "
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
			"but system has %d nodes.\n",
			MAX_NUMNODES, rio_table_hdr->num_scal_dev);
		return -ENODEV;
	}

	switch (rio_table_hdr->version){
	case 2:
		scal_detail_size = 11;
		rio_detail_size = 13;
		break;
	case 3:
		scal_detail_size = 12;
		rio_detail_size = 15;
		break;
1288 1289 1290 1291 1292
	default:
		printk(KERN_WARNING
		       "Calgary: Invalid Rio Grande Table Version: %d\n",
		       rio_table_hdr->version);
		return -EPROTO;
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
	}

	ptr = ((unsigned long)rio_table_hdr) + 3;
	for (i = 0; i < rio_table_hdr->num_scal_dev;
		    i++, ptr += scal_detail_size)
		scal_devs[i] = (struct scal_detail *)ptr;

	for (i = 0; i < rio_table_hdr->num_rio_dev;
		    i++, ptr += rio_detail_size)
		rio_devs[i] = (struct rio_detail *)ptr;

	return 0;
}

1307
static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1308
{
1309
	int dev;
1310
	u32 val;
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

	if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
		/*
		 * FIXME: properly scan for devices accross the
		 * PCI-to-PCI bridge on every CalIOC2 port.
		 */
		return 1;
	}

	for (dev = 1; dev < 8; dev++) {
		val = read_pci_config(bus, dev, 0, 0);
		if (val != 0xffffffff)
			break;
	}
	return (val != 0xffffffff);
}

void __init detect_calgary(void)
{
1330
	int bus;
1331
	void *tbl;
1332
	int calgary_found = 0;
1333
	unsigned long ptr;
1334
	unsigned int offset, prev_offset;
1335
	int ret;
1336 1337 1338 1339 1340 1341 1342 1343

	/*
	 * if the user specified iommu=off or iommu=soft or we found
	 * another HW IOMMU already, bail out.
	 */
	if (swiotlb || no_iommu || iommu_detected)
		return;

1344 1345 1346
	if (!use_calgary)
		return;

1347 1348 1349
	if (!early_pci_allowed())
		return;

1350 1351
	printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n");

1352 1353 1354
	ptr = (unsigned long)phys_to_virt(get_bios_ebda());

	rio_table_hdr = NULL;
1355
	prev_offset = 0;
1356
	offset = 0x180;
1357 1358 1359 1360 1361
	/*
	 * The next offset is stored in the 1st word.
	 * Only parse up until the offset increases:
	 */
	while (offset > prev_offset) {
1362 1363 1364
		/* The block id is stored in the 2nd word */
		if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
			/* set the pointer past the offset & block id */
1365
			rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
1366 1367
			break;
		}
1368
		prev_offset = offset;
1369 1370
		offset = *((unsigned short *)(ptr + offset));
	}
1371
	if (!rio_table_hdr) {
1372 1373
		printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table "
		       "in EBDA - bailing!\n");
1374 1375 1376
		return;
	}

1377 1378
	ret = build_detail_arrays();
	if (ret) {
1379
		printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret);
1380
		return;
1381
	}
1382

1383 1384
	specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);

1385
	for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1386
		struct calgary_bus_info *info = &bus_info[bus];
1387 1388 1389 1390 1391
		unsigned short pci_device;
		u32 val;

		val = read_pci_config(bus, 0, 0, 0);
		pci_device = (val & 0xFFFF0000) >> 16;
1392

1393
		if (!is_cal_pci_dev(pci_device))
1394
			continue;
1395

1396
		if (info->translation_disabled)
1397
			continue;
1398

1399 1400 1401 1402 1403 1404 1405
		if (calgary_bus_has_devices(bus, pci_device) ||
		    translate_empty_slots) {
			tbl = alloc_tce_table();
			if (!tbl)
				goto cleanup;
			info->tce_space = tbl;
			calgary_found = 1;
1406
		}
1407 1408
	}

1409 1410 1411
	printk(KERN_DEBUG "Calgary: finished detection, Calgary %s\n",
	       calgary_found ? "found" : "not found");

1412
	if (calgary_found) {
1413 1414
		iommu_detected = 1;
		calgary_detected = 1;
1415 1416 1417 1418
		printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected.\n");
		printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
		       "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
		       debugging ? "enabled" : "disabled");
1419 1420 1421 1422
	}
	return;

cleanup:
1423 1424 1425 1426 1427 1428
	for (--bus; bus >= 0; --bus) {
		struct calgary_bus_info *info = &bus_info[bus];

		if (info->tce_space)
			free_tce_table(info->tce_space);
	}
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
}

int __init calgary_iommu_init(void)
{
	int ret;

	if (no_iommu || swiotlb)
		return -ENODEV;

	if (!calgary_detected)
		return -ENODEV;

	/* ok, we're trying to use Calgary - let's roll */
	printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");

	ret = calgary_init();
	if (ret) {
		printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
		       "falling back to no_iommu\n", ret);
		if (end_pfn > MAX_DMA32_PFN)
			printk(KERN_ERR "WARNING more than 4GB of memory, "
					"32bit PCI may malfunction.\n");
		return ret;
	}

	force_iommu = 1;
1455
	bad_dma_address = 0x0;
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
	dma_ops = &calgary_dma_ops;

	return 0;
}

static int __init calgary_parse_options(char *p)
{
	unsigned int bridge;
	size_t len;
	char* endp;

	while (*p) {
		if (!strncmp(p, "64k", 3))
			specified_table_size = TCE_TABLE_SIZE_64K;
		else if (!strncmp(p, "128k", 4))
			specified_table_size = TCE_TABLE_SIZE_128K;
		else if (!strncmp(p, "256k", 4))
			specified_table_size = TCE_TABLE_SIZE_256K;
		else if (!strncmp(p, "512k", 4))
			specified_table_size = TCE_TABLE_SIZE_512K;
		else if (!strncmp(p, "1M", 2))
			specified_table_size = TCE_TABLE_SIZE_1M;
		else if (!strncmp(p, "2M", 2))
			specified_table_size = TCE_TABLE_SIZE_2M;
		else if (!strncmp(p, "4M", 2))
			specified_table_size = TCE_TABLE_SIZE_4M;
		else if (!strncmp(p, "8M", 2))
			specified_table_size = TCE_TABLE_SIZE_8M;

		len = strlen("translate_empty_slots");
		if (!strncmp(p, "translate_empty_slots", len))
			translate_empty_slots = 1;

		len = strlen("disable");
		if (!strncmp(p, "disable", len)) {
			p += len;
			if (*p == '=')
				++p;
			if (*p == '\0')
				break;
			bridge = simple_strtol(p, &endp, 0);
			if (p == endp)
				break;

1500
			if (bridge < MAX_PHB_BUS_NUM) {
1501
				printk(KERN_INFO "Calgary: disabling "
1502
				       "translation for PHB %#x\n", bridge);
1503
				bus_info[bridge].translation_disabled = 1;
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
			}
		}

		p = strpbrk(p, ",");
		if (!p)
			break;

		p++; /* skip ',' */
	}
	return 1;
}
__setup("calgary=", calgary_parse_options);
1516 1517 1518 1519 1520 1521 1522

static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
{
	struct iommu_table *tbl;
	unsigned int npages;
	int i;

1523
	tbl = pci_iommu(dev->bus);
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551

	for (i = 0; i < 4; i++) {
		struct resource *r = &dev->resource[PCI_BRIDGE_RESOURCES + i];

		/* Don't give out TCEs that map MEM resources */
		if (!(r->flags & IORESOURCE_MEM))
			continue;

		/* 0-based? we reserve the whole 1st MB anyway */
		if (!r->start)
			continue;

		/* cover the whole region */
		npages = (r->end - r->start) >> PAGE_SHIFT;
		npages++;

		iommu_range_reserve(tbl, r->start, npages);
	}
}

static int __init calgary_fixup_tce_spaces(void)
{
	struct pci_dev *dev = NULL;
	void *tce_space;

	if (no_iommu || swiotlb || !calgary_detected)
		return -ENODEV;

M
Muli Ben-Yehuda 已提交
1552
	printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578

	do {
		dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
		if (!dev)
			break;
		if (!is_cal_pci_dev(dev->device))
			continue;
		if (!translate_phb(dev))
			continue;

		tce_space = bus_info[dev->bus->number].tce_space;
		if (!tce_space)
			continue;

		calgary_fixup_one_tce_space(dev);

	} while (1);

	return 0;
}

/*
 * We need to be call after pcibios_assign_resources (fs_initcall level)
 * and before device_initcall.
 */
rootfs_initcall(calgary_fixup_tce_spaces);