pci_sun4v.c 24.5 KB
Newer Older
1 2
/* pci_sun4v.c: SUN4V specific PCI controller support.
 *
3
 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 5 6 7 8 9 10 11
 */

#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
12
#include <linux/percpu.h>
13 14
#include <linux/irq.h>
#include <linux/msi.h>
15
#include <linux/log2.h>
16 17 18 19 20 21 22

#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include <asm/pstate.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
23
#include <asm/prom.h>
24 25 26 27

#include "pci_impl.h"
#include "iommu_common.h"

28 29
#include "pci_sun4v.h"

30 31 32
static unsigned long vpci_major = 1;
static unsigned long vpci_minor = 1;

33
#define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
34

35
struct iommu_batch {
36
	struct device	*dev;		/* Device mapping is for.	*/
37 38 39 40
	unsigned long	prot;		/* IOMMU page protections	*/
	unsigned long	entry;		/* Index into IOTSB.		*/
	u64		*pglist;	/* List of physical pages	*/
	unsigned long	npages;		/* Number of pages in list.	*/
41 42
};

43
static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44 45

/* Interrupts must be disabled.  */
46
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47
{
48
	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49

50
	p->dev		= dev;
51 52 53 54 55 56
	p->prot		= prot;
	p->entry	= entry;
	p->npages	= 0;
}

/* Interrupts must be disabled.  */
57
static long iommu_batch_flush(struct iommu_batch *p)
58
{
59
	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60
	unsigned long devhandle = pbm->devhandle;
61 62 63 64 65
	unsigned long prot = p->prot;
	unsigned long entry = p->entry;
	u64 *pglist = p->pglist;
	unsigned long npages = p->npages;

66
	while (npages != 0) {
67 68 69 70 71 72
		long num;

		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
					  npages, prot, __pa(pglist));
		if (unlikely(num < 0)) {
			if (printk_ratelimit())
73
				printk("iommu_batch_flush: IOMMU map of "
74 75 76 77 78 79 80 81 82 83
				       "[%08lx:%08lx:%lx:%lx:%lx] failed with "
				       "status %ld\n",
				       devhandle, HV_PCI_TSBID(0, entry),
				       npages, prot, __pa(pglist), num);
			return -1;
		}

		entry += num;
		npages -= num;
		pglist += num;
84
	}
85 86 87 88 89 90 91 92

	p->entry = entry;
	p->npages = 0;

	return 0;
}

/* Interrupts must be disabled.  */
93
static inline long iommu_batch_add(u64 phys_page)
94
{
95
	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
96 97 98 99 100

	BUG_ON(p->npages >= PGLIST_NENTS);

	p->pglist[p->npages++] = phys_page;
	if (p->npages == PGLIST_NENTS)
101
		return iommu_batch_flush(p);
102 103 104 105 106

	return 0;
}

/* Interrupts must be disabled.  */
107
static inline long iommu_batch_end(void)
108
{
109
	struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110 111 112

	BUG_ON(p->npages >= PGLIST_NENTS);

113
	return iommu_batch_flush(p);
114
}
115

116
static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
{
	unsigned long n, i, start, end, limit;
	int pass;

	limit = arena->limit;
	start = arena->hint;
	pass = 0;

again:
	n = find_next_zero_bit(arena->map, limit, start);
	end = n + npages;
	if (unlikely(end >= limit)) {
		if (likely(pass < 1)) {
			limit = start;
			start = 0;
			pass++;
			goto again;
		} else {
			/* Scanned the whole thing, give up. */
			return -1;
		}
	}

	for (i = n; i < end; i++) {
		if (test_bit(i, arena->map)) {
			start = i + 1;
			goto again;
		}
	}

	for (i = n; i < end; i++)
		__set_bit(i, arena->map);

	arena->hint = end;

	return n;
}

155 156
static void arena_free(struct iommu_arena *arena, unsigned long base,
		       unsigned long npages)
157 158 159 160 161 162 163
{
	unsigned long i;

	for (i = base; i < (base + npages); i++)
		__clear_bit(i, arena->map);
}

164 165
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
				   dma_addr_t *dma_addrp, gfp_t gfp)
166
{
167
	struct iommu *iommu;
168
	unsigned long flags, order, first_page, npages, n;
169 170 171 172 173
	void *ret;
	long entry;

	size = IO_PAGE_ALIGN(size);
	order = get_order(size);
174
	if (unlikely(order >= MAX_ORDER))
175 176 177 178
		return NULL;

	npages = size >> IO_PAGE_SHIFT;

179
	first_page = __get_free_pages(gfp, order);
180
	if (unlikely(first_page == 0UL))
181
		return NULL;
182

183 184
	memset((char *)first_page, 0, PAGE_SIZE << order);

185
	iommu = dev->archdata.iommu;
186 187

	spin_lock_irqsave(&iommu->lock, flags);
188
	entry = arena_alloc(&iommu->arena, npages);
189 190
	spin_unlock_irqrestore(&iommu->lock, flags);

191 192
	if (unlikely(entry < 0L))
		goto arena_alloc_fail;
193 194 195 196 197 198

	*dma_addrp = (iommu->page_table_map_base +
		      (entry << IO_PAGE_SHIFT));
	ret = (void *) first_page;
	first_page = __pa(first_page);

199
	local_irq_save(flags);
200

201 202 203 204
	iommu_batch_start(dev,
			  (HV_PCI_MAP_ATTR_READ |
			   HV_PCI_MAP_ATTR_WRITE),
			  entry);
205

206
	for (n = 0; n < npages; n++) {
207
		long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
208 209 210
		if (unlikely(err < 0L))
			goto iommu_map_fail;
	}
211

212
	if (unlikely(iommu_batch_end() < 0L))
213
		goto iommu_map_fail;
214

215
	local_irq_restore(flags);
216 217

	return ret;
218 219 220 221

iommu_map_fail:
	/* Interrupts are disabled.  */
	spin_lock(&iommu->lock);
222
	arena_free(&iommu->arena, entry, npages);
223 224 225 226 227
	spin_unlock_irqrestore(&iommu->lock, flags);

arena_alloc_fail:
	free_pages(first_page, order);
	return NULL;
228 229
}

230 231
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
				 dma_addr_t dvma)
232
{
233
	struct pci_pbm_info *pbm;
234
	struct iommu *iommu;
235 236
	unsigned long flags, order, npages, entry;
	u32 devhandle;
237 238

	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
239 240
	iommu = dev->archdata.iommu;
	pbm = dev->archdata.host_controller;
241
	devhandle = pbm->devhandle;
242 243 244 245
	entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

	spin_lock_irqsave(&iommu->lock, flags);

246
	arena_free(&iommu->arena, entry, npages);
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);

	order = get_order(size);
	if (order < 10)
		free_pages((unsigned long)cpu, order);
262 263
}

264 265
static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
				    enum dma_data_direction direction)
266
{
267
	struct iommu *iommu;
268
	unsigned long flags, npages, oaddr;
269
	unsigned long i, base_paddr;
270
	u32 bus_addr, ret;
271 272 273
	unsigned long prot;
	long entry;

274
	iommu = dev->archdata.iommu;
275

276
	if (unlikely(direction == DMA_NONE))
277 278 279 280 281 282 283
		goto bad;

	oaddr = (unsigned long)ptr;
	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;

	spin_lock_irqsave(&iommu->lock, flags);
284
	entry = arena_alloc(&iommu->arena, npages);
285 286 287 288 289 290 291 292 293 294
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (unlikely(entry < 0L))
		goto bad;

	bus_addr = (iommu->page_table_map_base +
		    (entry << IO_PAGE_SHIFT));
	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
	base_paddr = __pa(oaddr & IO_PAGE_MASK);
	prot = HV_PCI_MAP_ATTR_READ;
295
	if (direction != DMA_TO_DEVICE)
296 297
		prot |= HV_PCI_MAP_ATTR_WRITE;

298
	local_irq_save(flags);
299

300
	iommu_batch_start(dev, prot, entry);
301

302
	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
303
		long err = iommu_batch_add(base_paddr);
304 305 306
		if (unlikely(err < 0L))
			goto iommu_map_fail;
	}
307
	if (unlikely(iommu_batch_end() < 0L))
308
		goto iommu_map_fail;
309

310
	local_irq_restore(flags);
311 312 313 314 315 316

	return ret;

bad:
	if (printk_ratelimit())
		WARN_ON(1);
317
	return DMA_ERROR_CODE;
318 319 320 321

iommu_map_fail:
	/* Interrupts are disabled.  */
	spin_lock(&iommu->lock);
322
	arena_free(&iommu->arena, entry, npages);
323 324
	spin_unlock_irqrestore(&iommu->lock, flags);

325
	return DMA_ERROR_CODE;
326 327
}

328 329
static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
				size_t sz, enum dma_data_direction direction)
330
{
331
	struct pci_pbm_info *pbm;
332
	struct iommu *iommu;
333
	unsigned long flags, npages;
334
	long entry;
335
	u32 devhandle;
336

337
	if (unlikely(direction == DMA_NONE)) {
338 339 340 341 342
		if (printk_ratelimit())
			WARN_ON(1);
		return;
	}

343 344
	iommu = dev->archdata.iommu;
	pbm = dev->archdata.host_controller;
345
	devhandle = pbm->devhandle;
346 347 348 349 350 351 352 353

	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;
	bus_addr &= IO_PAGE_MASK;

	spin_lock_irqsave(&iommu->lock, flags);

	entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
354
	arena_free(&iommu->arena, entry, npages);
355 356 357 358 359 360 361 362 363 364 365 366 367

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);
}

J
Jens Axboe 已提交
368
#define SG_ENT_PHYS_ADDRESS(SG)	(__pa(sg_virt((SG))))
369

370 371 372
static long fill_sg(long entry, struct device *dev,
		    struct scatterlist *sg,
		    int nused, int nelems, unsigned long prot)
373 374
{
	struct scatterlist *dma_sg = sg;
375 376 377 378 379
	unsigned long flags;
	int i;

	local_irq_save(flags);

380
	iommu_batch_start(dev, prot, entry);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

	for (i = 0; i < nused; i++) {
		unsigned long pteval = ~0UL;
		u32 dma_npages;

		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
			      dma_sg->dma_length +
			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
		do {
			unsigned long offset;
			signed int len;

			/* If we are here, we know we have at least one
			 * more page to map.  So walk forward until we
			 * hit a page crossing, and begin creating new
			 * mappings from that spot.
			 */
			for (;;) {
				unsigned long tmp;

				tmp = SG_ENT_PHYS_ADDRESS(sg);
				len = sg->length;
				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
					pteval = tmp & IO_PAGE_MASK;
					offset = tmp & (IO_PAGE_SIZE - 1UL);
					break;
				}
				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
					offset = 0UL;
					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
					break;
				}
J
Jens Axboe 已提交
414
				sg = sg_next(sg);
415
				nelems--;
416 417 418 419
			}

			pteval = (pteval & IOPTE_PAGE);
			while (len > 0) {
420 421
				long err;

422
				err = iommu_batch_add(pteval);
423 424 425
				if (unlikely(err < 0L))
					goto iommu_map_failed;

426 427 428 429 430 431 432
				pteval += IO_PAGE_SIZE;
				len -= (IO_PAGE_SIZE - offset);
				offset = 0;
				dma_npages--;
			}

			pteval = (pteval & IOPTE_PAGE) + len;
J
Jens Axboe 已提交
433
			sg = sg_next(sg);
434
			nelems--;
435 436 437 438 439

			/* Skip over any tail mappings we've fully mapped,
			 * adjusting pteval along the way.  Stop when we
			 * detect a page crossing event.
			 */
440 441
			while (nelems &&
			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442 443 444 445
			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
			       ((pteval ^
				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
				pteval += sg->length;
J
Jens Axboe 已提交
446
				sg = sg_next(sg);
447
				nelems--;
448 449 450 451
			}
			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
				pteval = ~0UL;
		} while (dma_npages != 0);
J
Jens Axboe 已提交
452
		dma_sg = sg_next(dma_sg);
453 454
	}

455
	if (unlikely(iommu_batch_end() < 0L))
456
		goto iommu_map_failed;
457

458 459
	local_irq_restore(flags);
	return 0;
460

461 462 463
iommu_map_failed:
	local_irq_restore(flags);
	return -1L;
464 465
}

466 467
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
			 int nelems, enum dma_data_direction direction)
468
{
469
	struct iommu *iommu;
470
	unsigned long flags, npages, prot;
471
	u32 dma_base;
472
	struct scatterlist *sgtmp;
473
	long entry, err;
474 475 476 477 478
	int used;

	/* Fast path single entry scatterlists. */
	if (nelems == 1) {
		sglist->dma_address =
J
Jens Axboe 已提交
479
			dma_4v_map_single(dev, sg_virt(sglist),
480
					  sglist->length, direction);
481
		if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
482 483 484 485 486
			return 0;
		sglist->dma_length = sglist->length;
		return 1;
	}

487
	iommu = dev->archdata.iommu;
488
	
489
	if (unlikely(direction == DMA_NONE))
490 491 492 493 494 495 496
		goto bad;

	/* Step 1: Prepare scatter list. */
	npages = prepare_sg(sglist, nelems);

	/* Step 2: Allocate a cluster and context, if necessary. */
	spin_lock_irqsave(&iommu->lock, flags);
497
	entry = arena_alloc(&iommu->arena, npages);
498 499 500 501 502 503 504 505 506 507 508 509 510 511
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (unlikely(entry < 0L))
		goto bad;

	dma_base = iommu->page_table_map_base +
		(entry << IO_PAGE_SHIFT);

	/* Step 3: Normalize DMA addresses. */
	used = nelems;

	sgtmp = sglist;
	while (used && sgtmp->dma_length) {
		sgtmp->dma_address += dma_base;
J
Jens Axboe 已提交
512
		sgtmp = sg_next(sgtmp);
513 514 515 516 517 518
		used--;
	}
	used = nelems - used;

	/* Step 4: Create the mappings. */
	prot = HV_PCI_MAP_ATTR_READ;
519
	if (direction != DMA_TO_DEVICE)
520 521
		prot |= HV_PCI_MAP_ATTR_WRITE;

522
	err = fill_sg(entry, dev, sglist, used, nelems, prot);
523 524
	if (unlikely(err < 0L))
		goto iommu_map_failed;
525 526 527 528 529 530 531

	return used;

bad:
	if (printk_ratelimit())
		WARN_ON(1);
	return 0;
532 533 534

iommu_map_failed:
	spin_lock_irqsave(&iommu->lock, flags);
535
	arena_free(&iommu->arena, entry, npages);
536 537 538
	spin_unlock_irqrestore(&iommu->lock, flags);

	return 0;
539 540
}

541 542
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
			    int nelems, enum dma_data_direction direction)
543
{
544
	struct pci_pbm_info *pbm;
545
	struct iommu *iommu;
546
	unsigned long flags, i, npages;
J
Jens Axboe 已提交
547
	struct scatterlist *sg, *sgprv;
548
	long entry;
549
	u32 devhandle, bus_addr;
550

551
	if (unlikely(direction == DMA_NONE)) {
552 553 554 555
		if (printk_ratelimit())
			WARN_ON(1);
	}

556 557
	iommu = dev->archdata.iommu;
	pbm = dev->archdata.host_controller;
558
	devhandle = pbm->devhandle;
559 560
	
	bus_addr = sglist->dma_address & IO_PAGE_MASK;
J
Jens Axboe 已提交
561 562 563
	sgprv = NULL;
	for_each_sg(sglist, sg, nelems, i) {
		if (sg->dma_length == 0)
564
			break;
J
Jens Axboe 已提交
565 566 567 568 569

		sgprv = sg;
	}

	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
570 571 572 573 574 575
		  bus_addr) >> IO_PAGE_SHIFT;

	entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

	spin_lock_irqsave(&iommu->lock, flags);

576
	arena_free(&iommu->arena, entry, npages);
577 578 579 580 581 582 583 584 585 586 587

	do {
		unsigned long num;

		num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
					    npages);
		entry += num;
		npages -= num;
	} while (npages != 0);

	spin_unlock_irqrestore(&iommu->lock, flags);
588 589
}

590 591 592
static void dma_4v_sync_single_for_cpu(struct device *dev,
				       dma_addr_t bus_addr, size_t sz,
				       enum dma_data_direction direction)
593
{
594
	/* Nothing to do... */
595 596
}

597 598 599
static void dma_4v_sync_sg_for_cpu(struct device *dev,
				   struct scatterlist *sglist, int nelems,
				   enum dma_data_direction direction)
600
{
601
	/* Nothing to do... */
602 603
}

604 605 606 607 608 609 610 611 612
const struct dma_ops sun4v_dma_ops = {
	.alloc_coherent			= dma_4v_alloc_coherent,
	.free_coherent			= dma_4v_free_coherent,
	.map_single			= dma_4v_map_single,
	.unmap_single			= dma_4v_unmap_single,
	.map_sg				= dma_4v_map_sg,
	.unmap_sg			= dma_4v_unmap_sg,
	.sync_single_for_cpu		= dma_4v_sync_single_for_cpu,
	.sync_sg_for_cpu		= dma_4v_sync_sg_for_cpu,
613 614
};

615
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
616
{
617 618 619
	struct property *prop;
	struct device_node *dp;

620 621 622 623
	dp = pbm->prom_node;
	prop = of_find_property(dp, "66mhz-capable", NULL);
	pbm->is_66mhz_capable = (prop != NULL);
	pbm->pci_bus = pci_scan_one_pbm(pbm);
624 625

	/* XXX register error interrupt handlers XXX */
626 627
}

628
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
629
					    struct iommu *iommu)
630
{
631
	struct iommu_arena *arena = &iommu->arena;
632
	unsigned long i, cnt = 0;
633
	u32 devhandle;
634 635 636 637 638 639 640 641

	devhandle = pbm->devhandle;
	for (i = 0; i < arena->limit; i++) {
		unsigned long ret, io_attrs, ra;

		ret = pci_sun4v_iommu_getmap(devhandle,
					     HV_PCI_TSBID(0, i),
					     &io_attrs, &ra);
642
		if (ret == HV_EOK) {
643 644 645 646 647 648 649
			if (page_in_phys_avail(ra)) {
				pci_sun4v_iommu_demap(devhandle,
						      HV_PCI_TSBID(0, i), 1);
			} else {
				cnt++;
				__set_bit(i, arena->map);
			}
650
		}
651
	}
652 653

	return cnt;
654 655
}

656 657
static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
658
	struct iommu *iommu = pbm->iommu;
659
	struct property *prop;
660
	unsigned long num_tsb_entries, sz, tsbsize;
661
	u32 vdma[2], dma_mask, dma_offset;
662 663 664 665

	prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
	if (prop) {
		u32 *val = prop->value;
666

667 668 669
		vdma[0] = val[0];
		vdma[1] = val[1];
	} else {
670 671 672 673 674
		/* No property, use default values. */
		vdma[0] = 0x80000000;
		vdma[1] = 0x80000000;
	}

675 676 677 678
	if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
		prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
			    vdma[0], vdma[1]);
		prom_halt();
679 680
	};

681 682 683
	dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
	num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
	tsbsize = num_tsb_entries * sizeof(iopte_t);
684 685 686 687 688 689 690 691 692 693

	dma_offset = vdma[0];

	/* Setup initial software IOMMU state. */
	spin_lock_init(&iommu->lock);
	iommu->ctx_lowest_free = 1;
	iommu->page_table_map_base = dma_offset;
	iommu->dma_addr_mask = dma_mask;

	/* Allocate and initialize the free area map.  */
694
	sz = (num_tsb_entries + 7) / 8;
695
	sz = (sz + 7UL) & ~7UL;
696
	iommu->arena.map = kzalloc(sz, GFP_KERNEL);
697 698 699 700 701 702
	if (!iommu->arena.map) {
		prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
		prom_halt();
	}
	iommu->arena.limit = num_tsb_entries;

703
	sz = probe_existing_entries(pbm, iommu);
704 705 706
	if (sz)
		printk("%s: Imported %lu TSB entries from OBP\n",
		       pbm->name, sz);
707 708
}

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
#ifdef CONFIG_PCI_MSI
struct pci_sun4v_msiq_entry {
	u64		version_type;
#define MSIQ_VERSION_MASK		0xffffffff00000000UL
#define MSIQ_VERSION_SHIFT		32
#define MSIQ_TYPE_MASK			0x00000000000000ffUL
#define MSIQ_TYPE_SHIFT			0
#define MSIQ_TYPE_NONE			0x00
#define MSIQ_TYPE_MSG			0x01
#define MSIQ_TYPE_MSI32			0x02
#define MSIQ_TYPE_MSI64			0x03
#define MSIQ_TYPE_INTX			0x08
#define MSIQ_TYPE_NONE2			0xff

	u64		intx_sysino;
	u64		reserved1;
	u64		stick;
	u64		req_id;  /* bus/device/func */
#define MSIQ_REQID_BUS_MASK		0xff00UL
#define MSIQ_REQID_BUS_SHIFT		8
#define MSIQ_REQID_DEVICE_MASK		0x00f8UL
#define MSIQ_REQID_DEVICE_SHIFT		3
#define MSIQ_REQID_FUNC_MASK		0x0007UL
#define MSIQ_REQID_FUNC_SHIFT		0

	u64		msi_address;

S
Simon Arlott 已提交
736
	/* The format of this value is message type dependent.
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
	 * For MSI bits 15:0 are the data from the MSI packet.
	 * For MSI-X bits 31:0 are the data from the MSI packet.
	 * For MSG, the message code and message routing code where:
	 * 	bits 39:32 is the bus/device/fn of the msg target-id
	 *	bits 18:16 is the message routing code
	 *	bits 7:0 is the message code
	 * For INTx the low order 2-bits are:
	 *	00 - INTA
	 *	01 - INTB
	 *	10 - INTC
	 *	11 - INTD
	 */
	u64		msi_data;

	u64		reserved2;
};

754 755
static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
			      unsigned long *head)
756
{
757
	unsigned long err, limit;
758

759
	err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
760
	if (unlikely(err))
761
		return -ENXIO;
762

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
	if (unlikely(*head >= limit))
		return -EFBIG;

	return 0;
}

static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
				 unsigned long msiqid, unsigned long *head,
				 unsigned long *msi)
{
	struct pci_sun4v_msiq_entry *ep;
	unsigned long err, type;

	/* Note: void pointer arithmetic, 'head' is a byte offset  */
	ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
				 (pbm->msiq_ent_count *
				  sizeof(struct pci_sun4v_msiq_entry))) +
	      *head);

	if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
		return 0;
785

786 787 788 789
	type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
	if (unlikely(type != MSIQ_TYPE_MSI32 &&
		     type != MSIQ_TYPE_MSI64))
		return -EINVAL;
790

791 792 793 794 795 796 797
	*msi = ep->msi_data;

	err = pci_sun4v_msi_setstate(pbm->devhandle,
				     ep->msi_data /* msi_num */,
				     HV_MSISTATE_IDLE);
	if (unlikely(err))
		return -ENXIO;
798

799 800
	/* Clear the entry.  */
	ep->version_type &= ~MSIQ_TYPE_MASK;
801

802 803 804 805
	(*head) += sizeof(struct pci_sun4v_msiq_entry);
	if (*head >=
	    (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
		*head = 0;
806

807
	return 1;
808 809
}

810 811
static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
			      unsigned long head)
812
{
813
	unsigned long err;
814

815 816 817
	err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
	if (unlikely(err))
		return -EINVAL;
818

819 820
	return 0;
}
821

822 823 824 825 826 827 828 829 830 831 832
static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
			       unsigned long msi, int is_msi64)
{
	if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
				  (is_msi64 ?
				   HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
		return -ENXIO;
	if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
		return -ENXIO;
	if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
		return -ENXIO;
833 834 835
	return 0;
}

836
static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
837
{
838 839 840 841 842 843 844 845 846
	unsigned long err, msiqid;

	err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
	if (err)
		return -ENXIO;

	pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);

	return 0;
847 848
}

849
static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
{
	unsigned long q_size, alloc_size, pages, order;
	int i;

	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
	alloc_size = (pbm->msiq_num * q_size);
	order = get_order(alloc_size);
	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
	if (pages == 0UL) {
		printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
		       order);
		return -ENOMEM;
	}
	memset((char *)pages, 0, PAGE_SIZE << order);
	pbm->msi_queues = (void *) pages;

	for (i = 0; i < pbm->msiq_num; i++) {
		unsigned long err, base = __pa(pages + (i * q_size));
		unsigned long ret1, ret2;

		err = pci_sun4v_msiq_conf(pbm->devhandle,
					  pbm->msiq_first + i,
					  base, pbm->msiq_ent_count);
		if (err) {
			printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
			       err);
			goto h_error;
		}

		err = pci_sun4v_msiq_info(pbm->devhandle,
					  pbm->msiq_first + i,
					  &ret1, &ret2);
		if (err) {
			printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
			       err);
			goto h_error;
		}
		if (ret1 != base || ret2 != pbm->msiq_ent_count) {
			printk(KERN_ERR "MSI: Bogus qconf "
			       "expected[%lx:%x] got[%lx:%lx]\n",
			       base, pbm->msiq_ent_count,
			       ret1, ret2);
			goto h_error;
		}
	}

	return 0;

h_error:
	free_pages(pages, order);
	return -EINVAL;
}

903
static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
904
{
905
	unsigned long q_size, alloc_size, pages, order;
906 907
	int i;

908 909
	for (i = 0; i < pbm->msiq_num; i++) {
		unsigned long msiqid = pbm->msiq_first + i;
910

911
		(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
912
	}
913

914 915 916
	q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
	alloc_size = (pbm->msiq_num * q_size);
	order = get_order(alloc_size);
917

918
	pages = (unsigned long) pbm->msi_queues;
919

920
	free_pages(pages, order);
921

922
	pbm->msi_queues = NULL;
923 924
}

925 926 927
static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
				    unsigned long msiqid,
				    unsigned long devino)
928
{
929
	unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
930

931 932
	if (!virt_irq)
		return -ENOMEM;
933

934 935 936 937
	if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
		return -EINVAL;
	if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
		return -EINVAL;
938

939
	return virt_irq;
940
}
941

942 943 944 945 946 947 948 949 950 951 952
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
	.get_head	=	pci_sun4v_get_head,
	.dequeue_msi	=	pci_sun4v_dequeue_msi,
	.set_head	=	pci_sun4v_set_head,
	.msi_setup	=	pci_sun4v_msi_setup,
	.msi_teardown	=	pci_sun4v_msi_teardown,
	.msiq_alloc	=	pci_sun4v_msiq_alloc,
	.msiq_free	=	pci_sun4v_msiq_free,
	.msiq_build_irq	=	pci_sun4v_msiq_build_irq,
};

953 954
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
955
	sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
956
}
957 958 959 960 961 962
#else /* CONFIG_PCI_MSI */
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */

963
static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
964 965 966
{
	struct pci_pbm_info *pbm;

D
David S. Miller 已提交
967 968 969 970
	if (devhandle & 0x40)
		pbm = &p->pbm_B;
	else
		pbm = &p->pbm_A;
971

972 973 974 975
	pbm->next = pci_pbm_root;
	pci_pbm_root = pbm;

	pbm->scan_bus = pci_sun4v_scan_bus;
976 977
	pbm->pci_ops = &sun4v_pci_ops;
	pbm->config_space_reg_bits = 12;
978

979 980
	pbm->index = pci_num_pbms++;

981
	pbm->parent = p;
982
	pbm->prom_node = dp;
983

D
David S. Miller 已提交
984
	pbm->devhandle = devhandle;
985

986
	pbm->name = dp->full_name;
987

988
	printk("%s: SUN4V PCI Bus Module\n", pbm->name);
989

990
	pci_determine_mem_io_space(pbm);
991

992
	pci_get_pbm_props(pbm);
993
	pci_sun4v_iommu_init(pbm);
994
	pci_sun4v_msi_init(pbm);
995 996
}

997
void __init sun4v_pci_init(struct device_node *dp, char *model_name)
998
{
999
	static int hvapi_negotiated = 0;
1000
	struct pci_controller_info *p;
1001
	struct pci_pbm_info *pbm;
1002
	struct iommu *iommu;
1003 1004
	struct property *prop;
	struct linux_prom64_registers *regs;
1005 1006
	u32 devhandle;
	int i;
D
David S. Miller 已提交
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	if (!hvapi_negotiated++) {
		int err = sun4v_hvapi_register(HV_GRP_PCI,
					       vpci_major,
					       &vpci_minor);

		if (err) {
			prom_printf("SUN4V_PCI: Could not register hvapi, "
				    "err=%d\n", err);
			prom_halt();
		}
		printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
		       vpci_major, vpci_minor);
1020 1021

		dma_ops = &sun4v_dma_ops;
1022 1023
	}

1024 1025 1026 1027
	prop = of_find_property(dp, "reg", NULL);
	regs = prop->value;

	devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
D
David S. Miller 已提交
1028

1029
	for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1030
		if (pbm->devhandle == (devhandle ^ 0x40)) {
1031
			pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1032 1033
			return;
		}
D
David S. Miller 已提交
1034
	}
1035

1036
	for_each_possible_cpu(i) {
1037 1038 1039 1040 1041
		unsigned long page = get_zeroed_page(GFP_ATOMIC);

		if (!page)
			goto fatal_memory_error;

1042
		per_cpu(iommu_batch, i).pglist = (u64 *) page;
1043
	}
1044

1045
	p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1046 1047 1048
	if (!p)
		goto fatal_memory_error;

1049
	iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1050 1051 1052
	if (!iommu)
		goto fatal_memory_error;

1053 1054
	p->pbm_A.iommu = iommu;

1055
	iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1056 1057 1058
	if (!iommu)
		goto fatal_memory_error;

1059 1060
	p->pbm_B.iommu = iommu;

1061
	pci_sun4v_pbm_init(p, dp, devhandle);
1062 1063 1064 1065 1066
	return;

fatal_memory_error:
	prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
	prom_halt();
1067
}