pci_sun4v.c 27.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* pci_sun4v.c: SUN4V specific PCI controller support.
 *
 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
 */

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
12
#include <linux/percpu.h>
13 14 15 16 17 18 19 20

#include <asm/pbm.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include <asm/pstate.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
21
#include <asm/prom.h>
22 23 24 25

#include "pci_impl.h"
#include "iommu_common.h"

26 27
#include "pci_sun4v.h"

28
#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
29

30 31 32 33 34 35
struct pci_iommu_batch {
	struct pci_dev	*pdev;		/* Device mapping is for.	*/
	unsigned long	prot;		/* IOMMU page protections	*/
	unsigned long	entry;		/* Index into IOTSB.		*/
	u64		*pglist;	/* List of physical pages	*/
	unsigned long	npages;		/* Number of pages in list.	*/
36 37
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);

/* Interrupts must be disabled.  */
static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
{
	struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);

	p->pdev		= pdev;
	p->prot		= prot;
	p->entry	= entry;
	p->npages	= 0;
}

/* Interrupts must be disabled.  */
static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
{
	struct pcidev_cookie *pcp = p->pdev->sysdata;
	unsigned long devhandle = pcp->pbm->devhandle;
	unsigned long prot = p->prot;
	unsigned long entry = p->entry;
	u64 *pglist = p->pglist;
	unsigned long npages = p->npages;

61
	while (npages != 0) {
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
		long num;

		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
					  npages, prot, __pa(pglist));
		if (unlikely(num < 0)) {
			if (printk_ratelimit())
				printk("pci_iommu_batch_flush: IOMMU map of "
				       "[%08lx:%08lx:%lx:%lx:%lx] failed with "
				       "status %ld\n",
				       devhandle, HV_PCI_TSBID(0, entry),
				       npages, prot, __pa(pglist), num);
			return -1;
		}

		entry += num;
		npages -= num;
		pglist += num;
79
	}
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

	p->entry = entry;
	p->npages = 0;

	return 0;
}

/* Interrupts must be disabled.  */
static inline long pci_iommu_batch_add(u64 phys_page)
{
	struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);

	BUG_ON(p->npages >= PGLIST_NENTS);

	p->pglist[p->npages++] = phys_page;
	if (p->npages == PGLIST_NENTS)
		return pci_iommu_batch_flush(p);

	return 0;
}

/* Interrupts must be disabled.  */
static inline long pci_iommu_batch_end(void)
{
	struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);

	BUG_ON(p->npages >= PGLIST_NENTS);

	return pci_iommu_batch_flush(p);
}
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
{
	unsigned long n, i, start, end, limit;
	int pass;

	limit = arena->limit;
	start = arena->hint;
	pass = 0;

again:
	n = find_next_zero_bit(arena->map, limit, start);
	end = n + npages;
	if (unlikely(end >= limit)) {
		if (likely(pass < 1)) {
			limit = start;
			start = 0;
			pass++;
			goto again;
		} else {
			/* Scanned the whole thing, give up. */
			return -1;
		}
	}

	for (i = n; i < end; i++) {
		if (test_bit(i, arena->map)) {
			start = i + 1;
			goto again;
		}
	}

	for (i = n; i < end; i++)
		__set_bit(i, arena->map);

	arena->hint = end;

	return n;
}

static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
{
	unsigned long i;

	for (i = base; i < (base + npages); i++)
		__clear_bit(i, arena->map);
}

158
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
159
{
160 161
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
162
	unsigned long flags, order, first_page, npages, n;
163 164 165 166 167
	void *ret;
	long entry;

	size = IO_PAGE_ALIGN(size);
	order = get_order(size);
168
	if (unlikely(order >= MAX_ORDER))
169 170 171 172
		return NULL;

	npages = size >> IO_PAGE_SHIFT;

173
	first_page = __get_free_pages(gfp, order);
174
	if (unlikely(first_page == 0UL))
175
		return NULL;
176

177 178 179 180 181 182 183 184 185
	memset((char *)first_page, 0, PAGE_SIZE << order);

	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;

	spin_lock_irqsave(&iommu->lock, flags);
	entry = pci_arena_alloc(&iommu->arena, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

186 187
	if (unlikely(entry < 0L))
		goto arena_alloc_fail;
188 189 190 191 192 193

	*dma_addrp = (iommu->page_table_map_base +
		      (entry << IO_PAGE_SHIFT));
	ret = (void *) first_page;
	first_page = __pa(first_page);

194
	local_irq_save(flags);
195

196 197 198 199
	pci_iommu_batch_start(pdev,
			      (HV_PCI_MAP_ATTR_READ |
			       HV_PCI_MAP_ATTR_WRITE),
			      entry);
200

201 202 203 204 205
	for (n = 0; n < npages; n++) {
		long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
		if (unlikely(err < 0L))
			goto iommu_map_fail;
	}
206

207 208
	if (unlikely(pci_iommu_batch_end() < 0L))
		goto iommu_map_fail;
209

210
	local_irq_restore(flags);
211 212

	return ret;
213 214 215 216 217 218 219 220 221 222

iommu_map_fail:
	/* Interrupts are disabled.  */
	spin_lock(&iommu->lock);
	pci_arena_free(&iommu->arena, entry, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

arena_alloc_fail:
	free_pages(first_page, order);
	return NULL;
223 224 225 226
}

static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
{
227 228
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
229 230
	unsigned long flags, order, npages, entry;
	u32 devhandle;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;
	devhandle = pcp->pbm->devhandle;
	entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

	spin_lock_irqsave(&iommu->lock, flags);

	pci_arena_free(&iommu->arena, entry, npages);

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);

	order = get_order(size);
	if (order < 10)
		free_pages((unsigned long)cpu, order);
256 257 258 259
}

static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
{
260 261 262
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
	unsigned long flags, npages, oaddr;
263
	unsigned long i, base_paddr;
264
	u32 bus_addr, ret;
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	unsigned long prot;
	long entry;

	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;

	if (unlikely(direction == PCI_DMA_NONE))
		goto bad;

	oaddr = (unsigned long)ptr;
	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;

	spin_lock_irqsave(&iommu->lock, flags);
	entry = pci_arena_alloc(&iommu->arena, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (unlikely(entry < 0L))
		goto bad;

	bus_addr = (iommu->page_table_map_base +
		    (entry << IO_PAGE_SHIFT));
	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
	base_paddr = __pa(oaddr & IO_PAGE_MASK);
	prot = HV_PCI_MAP_ATTR_READ;
	if (direction != PCI_DMA_TODEVICE)
		prot |= HV_PCI_MAP_ATTR_WRITE;

293
	local_irq_save(flags);
294

295
	pci_iommu_batch_start(pdev, prot, entry);
296

297 298 299 300 301 302 303
	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
		long err = pci_iommu_batch_add(base_paddr);
		if (unlikely(err < 0L))
			goto iommu_map_fail;
	}
	if (unlikely(pci_iommu_batch_end() < 0L))
		goto iommu_map_fail;
304

305
	local_irq_restore(flags);
306 307 308 309 310 311 312

	return ret;

bad:
	if (printk_ratelimit())
		WARN_ON(1);
	return PCI_DMA_ERROR_CODE;
313 314 315 316 317 318 319 320

iommu_map_fail:
	/* Interrupts are disabled.  */
	spin_lock(&iommu->lock);
	pci_arena_free(&iommu->arena, entry, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

	return PCI_DMA_ERROR_CODE;
321 322 323 324
}

static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
325 326
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
327
	unsigned long flags, npages;
328
	long entry;
329
	u32 devhandle;
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

	if (unlikely(direction == PCI_DMA_NONE)) {
		if (printk_ratelimit())
			WARN_ON(1);
		return;
	}

	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;
	devhandle = pcp->pbm->devhandle;

	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;
	bus_addr &= IO_PAGE_MASK;

	spin_lock_irqsave(&iommu->lock, flags);

	entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
	pci_arena_free(&iommu->arena, entry, npages);

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);
}

#define SG_ENT_PHYS_ADDRESS(SG)	\
	(__pa(page_address((SG)->page)) + (SG)->offset)

365
static inline long fill_sg(long entry, struct pci_dev *pdev,
366 367 368 369 370
			   struct scatterlist *sg,
			   int nused, int nelems, unsigned long prot)
{
	struct scatterlist *dma_sg = sg;
	struct scatterlist *sg_end = sg + nelems;
371 372 373 374 375 376
	unsigned long flags;
	int i;

	local_irq_save(flags);

	pci_iommu_batch_start(pdev, prot, entry);
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

	for (i = 0; i < nused; i++) {
		unsigned long pteval = ~0UL;
		u32 dma_npages;

		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
			      dma_sg->dma_length +
			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
		do {
			unsigned long offset;
			signed int len;

			/* If we are here, we know we have at least one
			 * more page to map.  So walk forward until we
			 * hit a page crossing, and begin creating new
			 * mappings from that spot.
			 */
			for (;;) {
				unsigned long tmp;

				tmp = SG_ENT_PHYS_ADDRESS(sg);
				len = sg->length;
				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
					pteval = tmp & IO_PAGE_MASK;
					offset = tmp & (IO_PAGE_SIZE - 1UL);
					break;
				}
				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
					offset = 0UL;
					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
					break;
				}
				sg++;
			}

			pteval = (pteval & IOPTE_PAGE);
			while (len > 0) {
415 416 417 418 419 420
				long err;

				err = pci_iommu_batch_add(pteval);
				if (unlikely(err < 0L))
					goto iommu_map_failed;

421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
				pteval += IO_PAGE_SIZE;
				len -= (IO_PAGE_SIZE - offset);
				offset = 0;
				dma_npages--;
			}

			pteval = (pteval & IOPTE_PAGE) + len;
			sg++;

			/* Skip over any tail mappings we've fully mapped,
			 * adjusting pteval along the way.  Stop when we
			 * detect a page crossing event.
			 */
			while (sg < sg_end &&
			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
			       ((pteval ^
				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
				pteval += sg->length;
				sg++;
			}
			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
				pteval = ~0UL;
		} while (dma_npages != 0);
		dma_sg++;
	}

448 449
	if (unlikely(pci_iommu_batch_end() < 0L))
		goto iommu_map_failed;
450

451 452
	local_irq_restore(flags);
	return 0;
453

454 455 456
iommu_map_failed:
	local_irq_restore(flags);
	return -1L;
457 458 459 460
}

static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
461 462
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
463
	unsigned long flags, npages, prot;
464
	u32 dma_base;
465
	struct scatterlist *sgtmp;
466
	long entry, err;
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	int used;

	/* Fast path single entry scatterlists. */
	if (nelems == 1) {
		sglist->dma_address =
			pci_4v_map_single(pdev,
					  (page_address(sglist->page) + sglist->offset),
					  sglist->length, direction);
		if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
			return 0;
		sglist->dma_length = sglist->length;
		return 1;
	}

	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;
	
	if (unlikely(direction == PCI_DMA_NONE))
		goto bad;

	/* Step 1: Prepare scatter list. */
	npages = prepare_sg(sglist, nelems);

	/* Step 2: Allocate a cluster and context, if necessary. */
	spin_lock_irqsave(&iommu->lock, flags);
	entry = pci_arena_alloc(&iommu->arena, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (unlikely(entry < 0L))
		goto bad;

	dma_base = iommu->page_table_map_base +
		(entry << IO_PAGE_SHIFT);

	/* Step 3: Normalize DMA addresses. */
	used = nelems;

	sgtmp = sglist;
	while (used && sgtmp->dma_length) {
		sgtmp->dma_address += dma_base;
		sgtmp++;
		used--;
	}
	used = nelems - used;

	/* Step 4: Create the mappings. */
	prot = HV_PCI_MAP_ATTR_READ;
	if (direction != PCI_DMA_TODEVICE)
		prot |= HV_PCI_MAP_ATTR_WRITE;

517 518 519
	err = fill_sg(entry, pdev, sglist, used, nelems, prot);
	if (unlikely(err < 0L))
		goto iommu_map_failed;
520 521 522 523 524 525 526

	return used;

bad:
	if (printk_ratelimit())
		WARN_ON(1);
	return 0;
527 528 529 530 531 532 533

iommu_map_failed:
	spin_lock_irqsave(&iommu->lock, flags);
	pci_arena_free(&iommu->arena, entry, npages);
	spin_unlock_irqrestore(&iommu->lock, flags);

	return 0;
534 535 536 537
}

static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
538 539
	struct pcidev_cookie *pcp;
	struct pci_iommu *iommu;
540
	unsigned long flags, i, npages;
541
	long entry;
542
	u32 devhandle, bus_addr;
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

	if (unlikely(direction == PCI_DMA_NONE)) {
		if (printk_ratelimit())
			WARN_ON(1);
	}

	pcp = pdev->sysdata;
	iommu = pcp->pbm->iommu;
	devhandle = pcp->pbm->devhandle;
	
	bus_addr = sglist->dma_address & IO_PAGE_MASK;

	for (i = 1; i < nelems; i++)
		if (sglist[i].dma_length == 0)
			break;
	i--;
	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
		  bus_addr) >> IO_PAGE_SHIFT;

	entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

	spin_lock_irqsave(&iommu->lock, flags);

	pci_arena_free(&iommu->arena, entry, npages);

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);
578 579 580 581
}

static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{
582
	/* Nothing to do... */
583 584 585 586
}

static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
{
587
	/* Nothing to do... */
588 589 590 591 592 593 594 595 596 597 598 599 600
}

struct pci_iommu_ops pci_sun4v_iommu_ops = {
	.alloc_consistent		= pci_4v_alloc_consistent,
	.free_consistent		= pci_4v_free_consistent,
	.map_single			= pci_4v_map_single,
	.unmap_single			= pci_4v_unmap_single,
	.map_sg				= pci_4v_map_sg,
	.unmap_sg			= pci_4v_unmap_sg,
	.dma_sync_single_for_cpu	= pci_4v_dma_sync_single_for_cpu,
	.dma_sync_sg_for_cpu		= pci_4v_dma_sync_sg_for_cpu,
};

601 602
/* SUN4V PCI configuration space accessors. */

603 604 605 606 607 608 609 610 611 612 613 614 615
struct pdev_entry {
	struct pdev_entry	*next;
	u32			devhandle;
	unsigned int		bus;
	unsigned int		device;
	unsigned int		func;
};

#define PDEV_HTAB_SIZE	16
#define PDEV_HTAB_MASK	(PDEV_HTAB_SIZE - 1)
static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];

static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
616
{
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	unsigned int val;

	val = (devhandle ^ (devhandle >> 4));
	val ^= bus;
	val ^= device;
	val ^= func;

	return val & PDEV_HTAB_MASK;
}

static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
{
	struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
	struct pdev_entry **slot;

	if (!p)
		return -ENOMEM;

	slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
	p->next = *slot;
	*slot = p;

	p->devhandle = devhandle;
	p->bus = bus;
	p->device = device;
	p->func = func;

	return 0;
}

/* Recursively descend into the OBP device tree, rooted at toplevel_node,
 * looking for a PCI device matching bus and devfn.
 */
650
static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
651
{
652
	toplevel_node = toplevel_node->child;
653

654 655 656 657
	while (toplevel_node != NULL) {
		struct linux_prom_pci_registers *regs;
		struct property *prop;
		int ret;
658

659
		ret = obp_find(toplevel_node, bus, devfn);
660 661 662
		if (ret != 0)
			return ret;

663 664
		prop = of_find_property(toplevel_node, "reg", NULL);
		if (!prop)
665 666
			goto next_sibling;

667 668 669
		regs = prop->value;
		if (((regs->phys_hi >> 16) & 0xff) == bus &&
		    ((regs->phys_hi >> 8) & 0xff) == devfn)
670 671 672
			break;

	next_sibling:
673
		toplevel_node = toplevel_node->sibling;
674 675
	}

676
	return toplevel_node != NULL;
677 678 679 680 681 682 683 684 685 686 687 688 689 690
}

static int pdev_htab_populate(struct pci_pbm_info *pbm)
{
	u32 devhandle = pbm->devhandle;
	unsigned int bus;

	for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
		unsigned int devfn;

		for (devfn = 0; devfn < 256; devfn++) {
			unsigned int device = PCI_SLOT(devfn);
			unsigned int func = PCI_FUNC(devfn);

691
			if (obp_find(pbm->prom_node, bus, devfn)) {
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
				int err = pdev_htab_add(devhandle, bus,
							device, func);
				if (err)
					return err;
			}
		}
	}

	return 0;
}

static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
{
	struct pdev_entry *p;

	p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
	while (p) {
		if (p->devhandle == devhandle &&
		    p->bus == bus &&
		    p->device == device &&
		    p->func == func)
			break;

		p = p->next;
	}

	return p;
}

static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
{
723 724 725
	if (bus < pbm->pci_first_busno ||
	    bus > pbm->pci_last_busno)
		return 1;
726
	return pdev_find(pbm->devhandle, bus, device, func) == NULL;
727 728
}

729 730 731
static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
				  int where, int size, u32 *value)
{
732
	struct pci_pbm_info *pbm = bus_dev->sysdata;
733
	u32 devhandle = pbm->devhandle;
734 735 736 737 738
	unsigned int bus = bus_dev->number;
	unsigned int device = PCI_SLOT(devfn);
	unsigned int func = PCI_FUNC(devfn);
	unsigned long ret;

739
	if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
740 741 742 743 744
		ret = ~0UL;
	} else {
		ret = pci_sun4v_config_get(devhandle,
				HV_PCI_DEVICE_BUILD(bus, device, func),
				where, size);
745
#if 0
746
		printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
747 748 749
		       devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
		       where, size, ret);
#endif
750
	}
751 752 753 754 755 756 757 758 759 760 761 762 763 764
	switch (size) {
	case 1:
		*value = ret & 0xff;
		break;
	case 2:
		*value = ret & 0xffff;
		break;
	case 4:
		*value = ret & 0xffffffff;
		break;
	};


	return PCIBIOS_SUCCESSFUL;
765 766 767 768 769
}

static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
				   int where, int size, u32 value)
{
770
	struct pci_pbm_info *pbm = bus_dev->sysdata;
771
	u32 devhandle = pbm->devhandle;
772 773 774 775 776
	unsigned int bus = bus_dev->number;
	unsigned int device = PCI_SLOT(devfn);
	unsigned int func = PCI_FUNC(devfn);
	unsigned long ret;

777
	if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
778 779 780 781 782
		/* Do nothing. */
	} else {
		ret = pci_sun4v_config_put(devhandle,
				HV_PCI_DEVICE_BUILD(bus, device, func),
				where, size, value);
783
#if 0
784
		printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
785 786 787
		       devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
		       where, size, value, ret);
#endif
788
	}
789
	return PCIBIOS_SUCCESSFUL;
790 791 792 793 794 795 796 797
}

static struct pci_ops pci_sun4v_ops = {
	.read =		pci_sun4v_read_pci_cfg,
	.write =	pci_sun4v_write_pci_cfg,
};


798 799 800 801 802 803 804 805 806 807 808 809 810 811
static void pbm_scan_bus(struct pci_controller_info *p,
			 struct pci_pbm_info *pbm)
{
	struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);

	if (!cookie) {
		prom_printf("%s: Critical allocation failure.\n", pbm->name);
		prom_halt();
	}

	/* All we care about is the PBM. */
	memset(cookie, 0, sizeof(*cookie));
	cookie->pbm = pbm;

812
	pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
813
#if 0
814 815
	pci_fixup_host_bridge_self(pbm->pci_bus);
	pbm->pci_bus->self->sysdata = cookie;
816
#endif
817
	pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
818 819 820 821 822 823 824
	pci_record_assignments(pbm, pbm->pci_bus);
	pci_assign_unassigned(pbm, pbm->pci_bus);
	pci_fixup_irq(pbm, pbm->pci_bus);
	pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
	pci_setup_busmastering(pbm, pbm->pci_bus);
}

825 826
static void pci_sun4v_scan_bus(struct pci_controller_info *p)
{
827 828 829 830 831 832
	struct property *prop;
	struct device_node *dp;

	if ((dp = p->pbm_A.prom_node) != NULL) {
		prop = of_find_property(dp, "66mhz-capable", NULL);
		p->pbm_A.is_66mhz_capable = (prop != NULL);
833 834 835

		pbm_scan_bus(p, &p->pbm_A);
	}
836 837 838
	if ((dp = p->pbm_B.prom_node) != NULL) {
		prop = of_find_property(dp, "66mhz-capable", NULL);
		p->pbm_B.is_66mhz_capable = (prop != NULL);
839 840 841 842 843

		pbm_scan_bus(p, &p->pbm_B);
	}

	/* XXX register error interrupt handlers XXX */
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
}

static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
{
	struct pcidev_cookie *pcp = pdev->sysdata;
	struct pci_pbm_info *pbm = pcp->pbm;
	struct resource *res, *root;
	u32 reg;
	int where, size, is_64bit;

	res = &pdev->resource[resource];
	if (resource < 6) {
		where = PCI_BASE_ADDRESS_0 + (resource * 4);
	} else if (resource == PCI_ROM_RESOURCE) {
		where = pdev->rom_base_reg;
	} else {
		/* Somebody might have asked allocation of a non-standard resource */
		return;
	}

864
	/* XXX 64-bit MEM handling is not %100 correct... XXX */
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	is_64bit = 0;
	if (res->flags & IORESOURCE_IO)
		root = &pbm->io_space;
	else {
		root = &pbm->mem_space;
		if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
		    == PCI_BASE_ADDRESS_MEM_TYPE_64)
			is_64bit = 1;
	}

	size = res->end - res->start;
	pci_read_config_dword(pdev, where, &reg);
	reg = ((reg & size) |
	       (((u32)(res->start - root->start)) & ~size));
	if (resource == PCI_ROM_RESOURCE) {
		reg |= PCI_ROM_ADDRESS_ENABLE;
		res->flags |= IORESOURCE_ROM_ENABLE;
	}
	pci_write_config_dword(pdev, where, reg);

	/* This knows that the upper 32-bits of the address
	 * must be zero.  Our PCI common layer enforces this.
	 */
	if (is_64bit)
		pci_write_config_dword(pdev, where + 4, 0);
}

static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
				      struct resource *res,
				      struct resource *root)
{
	res->start += root->start;
	res->end += root->start;
}

/* Use ranges property to determine where PCI MEM, I/O, and Config
 * space are for this PCI bus module.
 */
static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
{
905
	int i, saw_mem, saw_io;
906

907
	saw_mem = saw_io = 0;
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
	for (i = 0; i < pbm->num_pbm_ranges; i++) {
		struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
		unsigned long a;
		int type;

		type = (pr->child_phys_hi >> 24) & 0x3;
		a = (((unsigned long)pr->parent_phys_hi << 32UL) |
		     ((unsigned long)pr->parent_phys_lo  <<  0UL));

		switch (type) {
		case 1:
			/* 16-bit IO space, 16MB */
			pbm->io_space.start = a;
			pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
			pbm->io_space.flags = IORESOURCE_IO;
			saw_io = 1;
			break;

		case 2:
			/* 32-bit MEM space, 2GB */
			pbm->mem_space.start = a;
			pbm->mem_space.end = a + (0x80000000UL - 1UL);
			pbm->mem_space.flags = IORESOURCE_MEM;
			saw_mem = 1;
			break;

934 935 936
		case 3:
			/* XXX 64-bit MEM handling XXX */

937 938 939 940 941
		default:
			break;
		};
	}

942
	if (!saw_io || !saw_mem) {
943 944
		prom_printf("%s: Fatal error, missing %s PBM range.\n",
			    pbm->name,
945
			    (!saw_io ? "IO" : "MEM"));
946 947 948
		prom_halt();
	}

949
	printk("%s: PCI IO[%lx] MEM[%lx]\n",
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
	       pbm->name,
	       pbm->io_space.start,
	       pbm->mem_space.start);
}

static void pbm_register_toplevel_resources(struct pci_controller_info *p,
					    struct pci_pbm_info *pbm)
{
	pbm->io_space.name = pbm->mem_space.name = pbm->name;

	request_resource(&ioport_resource, &pbm->io_space);
	request_resource(&iomem_resource, &pbm->mem_space);
	pci_register_legacy_regions(&pbm->io_space,
				    &pbm->mem_space);
}

966 967
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
					    struct pci_iommu *iommu)
968 969
{
	struct pci_iommu_arena *arena = &iommu->arena;
970
	unsigned long i, cnt = 0;
971
	u32 devhandle;
972 973 974 975 976 977 978 979

	devhandle = pbm->devhandle;
	for (i = 0; i < arena->limit; i++) {
		unsigned long ret, io_attrs, ra;

		ret = pci_sun4v_iommu_getmap(devhandle,
					     HV_PCI_TSBID(0, i),
					     &io_attrs, &ra);
980
		if (ret == HV_EOK) {
981 982 983 984 985 986 987
			if (page_in_phys_avail(ra)) {
				pci_sun4v_iommu_demap(devhandle,
						      HV_PCI_TSBID(0, i), 1);
			} else {
				cnt++;
				__set_bit(i, arena->map);
			}
988
		}
989
	}
990 991

	return cnt;
992 993
}

994 995
static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
996
	struct pci_iommu *iommu = pbm->iommu;
997
	struct property *prop;
998 999
	unsigned long num_tsb_entries, sz;
	u32 vdma[2], dma_mask, dma_offset;
1000 1001 1002 1003 1004
	int tsbsize;

	prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
	if (prop) {
		u32 *val = prop->value;
1005

1006 1007 1008
		vdma[0] = val[0];
		vdma[1] = val[1];
	} else {
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		/* No property, use default values. */
		vdma[0] = 0x80000000;
		vdma[1] = 0x80000000;
	}

	dma_mask = vdma[0];
	switch (vdma[1]) {
		case 0x20000000:
			dma_mask |= 0x1fffffff;
			tsbsize = 64;
			break;

		case 0x40000000:
			dma_mask |= 0x3fffffff;
			tsbsize = 128;
			break;

		case 0x80000000:
			dma_mask |= 0x7fffffff;
1028
			tsbsize = 256;
1029 1030 1031 1032 1033 1034 1035
			break;

		default:
			prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
			prom_halt();
	};

1036 1037
	tsbsize *= (8 * 1024);

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	num_tsb_entries = tsbsize / sizeof(iopte_t);

	dma_offset = vdma[0];

	/* Setup initial software IOMMU state. */
	spin_lock_init(&iommu->lock);
	iommu->ctx_lowest_free = 1;
	iommu->page_table_map_base = dma_offset;
	iommu->dma_addr_mask = dma_mask;

	/* Allocate and initialize the free area map.  */
	sz = num_tsb_entries / 8;
	sz = (sz + 7UL) & ~7UL;
	iommu->arena.map = kmalloc(sz, GFP_KERNEL);
	if (!iommu->arena.map) {
		prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
		prom_halt();
	}
	memset(iommu->arena.map, 0, sz);
	iommu->arena.limit = num_tsb_entries;

1059
	sz = probe_existing_entries(pbm, iommu);
1060 1061 1062
	if (sz)
		printk("%s: Imported %lu TSB entries from OBP\n",
		       pbm->name, sz);
1063 1064
}

1065 1066
static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
{
1067 1068 1069 1070 1071 1072
	struct property *prop;
	unsigned int *busrange;

	prop = of_find_property(pbm->prom_node, "bus-range", NULL);

	busrange = prop->value;
1073 1074 1075 1076 1077 1078

	pbm->pci_first_busno = busrange[0];
	pbm->pci_last_busno = busrange[1];

}

1079
static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1080 1081
{
	struct pci_pbm_info *pbm;
1082 1083
	struct property *prop;
	int len, i;
1084

D
David S. Miller 已提交
1085 1086 1087 1088
	if (devhandle & 0x40)
		pbm = &p->pbm_B;
	else
		pbm = &p->pbm_A;
1089 1090

	pbm->parent = p;
1091
	pbm->prom_node = dp;
1092 1093
	pbm->pci_first_slot = 1;

D
David S. Miller 已提交
1094
	pbm->devhandle = devhandle;
1095

1096
	pbm->name = dp->full_name;
1097

1098
	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1099

1100 1101
	prop = of_find_property(dp, "ranges", &len);
	pbm->pbm_ranges = prop->value;
1102
	pbm->num_pbm_ranges =
1103
		(len / sizeof(struct linux_prom_pci_ranges));
1104

D
David S. Miller 已提交
1105 1106 1107 1108 1109 1110
	/* Mask out the top 8 bits of the ranges, leaving the real
	 * physical address.
	 */
	for (i = 0; i < pbm->num_pbm_ranges; i++)
		pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;

1111 1112 1113
	pci_sun4v_determine_mem_io_space(pbm);
	pbm_register_toplevel_resources(p, pbm);

1114 1115 1116 1117
	prop = of_find_property(dp, "interrupt-map", &len);
	pbm->pbm_intmap = prop->value;
	pbm->num_pbm_intmap =
		(len / sizeof(struct linux_prom_pci_intmap));
1118

1119 1120
	prop = of_find_property(dp, "interrupt-map-mask", NULL);
	pbm->pbm_intmask = prop->value;
1121

1122
	pci_sun4v_get_bus_range(pbm);
1123
	pci_sun4v_iommu_init(pbm);
1124 1125

	pdev_htab_populate(pbm);
1126 1127
}

1128
void sun4v_pci_init(struct device_node *dp, char *model_name)
1129
{
1130 1131
	struct pci_controller_info *p;
	struct pci_iommu *iommu;
1132 1133
	struct property *prop;
	struct linux_prom64_registers *regs;
1134 1135
	u32 devhandle;
	int i;
D
David S. Miller 已提交
1136

1137 1138 1139 1140
	prop = of_find_property(dp, "reg", NULL);
	regs = prop->value;

	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
D
David S. Miller 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151

	for (p = pci_controller_root; p; p = p->next) {
		struct pci_pbm_info *pbm;

		if (p->pbm_A.prom_node && p->pbm_B.prom_node)
			continue;

		pbm = (p->pbm_A.prom_node ?
		       &p->pbm_A :
		       &p->pbm_B);

1152
		if (pbm->devhandle == (devhandle ^ 0x40)) {
1153
			pci_sun4v_pbm_init(p, dp, devhandle);
1154 1155
			return;
		}
D
David S. Miller 已提交
1156
	}
1157

1158
	for_each_possible_cpu(i) {
1159 1160 1161 1162 1163
		unsigned long page = get_zeroed_page(GFP_ATOMIC);

		if (!page)
			goto fatal_memory_error;

1164
		per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1165
	}
1166 1167 1168 1169 1170

	p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
	if (!p)
		goto fatal_memory_error;

1171 1172 1173
	memset(p, 0, sizeof(*p));

	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1174 1175 1176
	if (!iommu)
		goto fatal_memory_error;

1177 1178 1179 1180
	memset(iommu, 0, sizeof(*iommu));
	p->pbm_A.iommu = iommu;

	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1181 1182 1183
	if (!iommu)
		goto fatal_memory_error;

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	memset(iommu, 0, sizeof(*iommu));
	p->pbm_B.iommu = iommu;

	p->next = pci_controller_root;
	pci_controller_root = p;

	p->index = pci_num_controllers++;
	p->pbms_same_domain = 0;

	p->scan_bus = pci_sun4v_scan_bus;
	p->base_address_update = pci_sun4v_base_address_update;
	p->resource_adjust = pci_sun4v_resource_adjust;
	p->pci_ops = &pci_sun4v_ops;

	/* Like PSYCHO and SCHIZO we have a 2GB aligned area
	 * for memory space.
	 */
	pci_memspace_mask = 0x7fffffffUL;

1203
	pci_sun4v_pbm_init(p, dp, devhandle);
1204 1205 1206 1207 1208
	return;

fatal_memory_error:
	prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
	prom_halt();
1209
}